repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
maurus56/musical-engine | refs/heads/master | gen_utils/sequence_generator(opt 2).py | 1 | import numpy as np
#Extrapolates from a given seed sequence
def generate_from_seed(model, seed, sequence_length, data_variance, data_mean):
seedSeq = seed.copy()
output = []
#The generation algorithm is simple:
#Step 1 - Given A = [X_0, X_1, ... X_n], generate X_n + 1
#Step 2 - Concatenate X_n + 1 onto A
#Step 3 - Repeat MAX_SEQ_LEN times
for it in range(sequence_length):
seedSeqNew = model._predict(seedSeq) #Step 1. Generate X_n + 1
#Step 2. Append it to the sequence
if it == 0:
for i in range(seedSeqNew.shape[1]):
output.append(seedSeqNew[0][i].copy())
else:
output.append(seedSeqNew[0][seedSeqNew.shape[1]-1].copy())
newSeq = seedSeqNew[0][seedSeqNew.shape[1]-1]
newSeq = np.reshape(newSeq, (1, 1, newSeq.shape[0]))
seedSeq = np.concatenate((seedSeq, newSeq), axis=1)
#Finally, post-process the generated sequence so that we have valid frequencies
#We're essentially just undo-ing the data centering process
for i in range(len(output)):
output[i] *= data_variance
output[i] += data_mean
return output
|
rhgong/itk-with-dom | refs/heads/master | Wrapping/ExternalProjects/ItkVtkGlue/Wrapping/Python/itkvtk.py | 12 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk, vtk
import ItkVtkGlue
names = [name for name in dir(ItkVtkGlue) if not name.startswith("__")]
for name in names :
setattr(itk, name, ItkVtkGlue.__dict__[name])
# some cleanup
del itk, vtk, ItkVtkGlue, names, name
# also keep ItkVtkGlue members in that namespace
from ItkVtkGlue import *
# display a deprecation warning
import sys
print >> sys.stderr, "Warning: ItkVtkGlue classes are available in itk module, without importing itkvtk."
print >> sys.stderr, "Warning: itkvtk is no more supported and will be removed soon."
del sys
|
Maccimo/intellij-community | refs/heads/master | python/testData/inspections/PyGlobalUndefinedInspection/reassignedAndPresent.py | 12 | bar = "something"
def foo():
global bar
bar = "something else"
|
unitusdev/unitus | refs/heads/master | qa/rpc-tests/mempool_reorg.py | 41 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
GHubgenius/Hammer | refs/heads/master | lib/neighborHost_class.py | 3 | #!/usr/bin/python2.7
#coding:utf-8
'''
'''
import os
import urllib
import urllib2
import re
import socket
class NeighborHost(object):
"""docstring for neiborDomain"""
def __init__(self, ip):
super(NeighborHost, self).__init__()
m = re.match('(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])',ip)
if m:
self.ip = m.group(0)
else:
self.domain = ip
self.ip = socket.gethostbyname(ip)
def getFromBing(self, ip=None):
if ip == None:
ip = self.ip
interface_url = 'http://cn.bing.com/search?count=100&q=ip:%3a'
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1) Gecko/20090624 Firefox/3.5"
headers["Accept"] = "text/plain"
url = interface_url + ip
req = urllib2.Request(url,headers=headers)
ret = urllib2.urlopen(req).read()
return ret
def getFromChinaZ(self,ip=None):
try:
if ip == None:
ip = self.ip
url = 'http://s.tool.chinaz.com/same/'
post = {'s':ip}
post = urllib.urlencode(post)
content = urllib2.urlopen(url, post).read()
hosts = re.findall('target=_blank>([^<]*)</a></li>',content)
return hosts
except:
return []
# ----------------------------------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------------------------------
if __name__ == '__main__':
nebordom = NeighborHost('61.164.42.190')
print nebordom.getFromChinaZ() |
chulchultrain/FriendLeague | refs/heads/master | leagreq/league_curl.py | 1 | import requests
import league_conf
import time
# request_url_map_populate function
# Creates a map for sub URLs for queries that may be needed from the Riot API.
# Ex: to get the summoner data from Riot API, you have to add the
# suburl of /lol/summoner/v3/summoners/by-name
# This function is where we put all the necessary queries to the Riot API we
# may need.
def request_url_map_populate():
request_url_map = {}
request_url_map['match_list'] = '/lol/match/v4/matchlists/by-account'
request_url_map['summoner'] = '/lol/summoner/v4/summoners/by-name'
request_url_map['account'] = '/lol/summoner/v4/summoners/by-account'
request_url_map['match'] = '/lol/match/v4/matches'
request_url_map['matchTimeline'] = '/lol/match/v4/timelines/by-match'
request_url_map['champion'] = 'champion.json'
return request_url_map
# is_static_request function
# determines whether the request type calls riots static api
# so far only champion data is static
def is_static_request(r_type):
if r_type is 'champion':
return True
else:
return False
#bunch of globals for use
base_url = 'https://na1.api.riotgames.com'
request_url_map = request_url_map_populate()
static_data_base_url = 'http://ddragon.leagueoflegends.com/cdn/9.23.1/data/en_US'
cj = 'champion.json'
wait_map = {}
#the way the request url map is set up i can just set it so that champs map to different base url.
# add_header_query function
# Creates the substring to be added to the end of a Riot API request URL for
# parameters that we need passed to the query, such as API_KEY
# (our authentication) and beginTime for filtering the actual query results
# before it reaches us.
#
def add_header_query(hq_map):
#TODO
res = '?'
headers = []
for x in hq_map:
if type(hq_map[x]) is list:
for y in hq_map[x]:
headers.append(str(x) + '=' + str(y))
else:
headers.append(str(x) + '=' + str(hq_map[x]))
header_statement = '&'.join(headers)
res += header_statement
return res
# process_header_map function
# This function is a preprocessing function to be used on a header before it is
# passed into the add_header_query function. First, it performs a type check
# to make sure that the map is indeed a map. Then it adds the API key because
# you will always need the API key in order to make a request upon Riot API.
def process_header_map(header_params):
if type(header_params) is not dict:
h_p_copy = {}
else:
h_p_copy = header_params.copy()
h_p_copy['api_key'] = league_conf.api_key
return h_p_copy
# gen_request
# function to create the actual string that represents the request URL
# for the requested resources indicated by r_type and r_value
# with the additional parameters in the header_params map
def gen_request(r_type,r_value=None,header_params=None):
res = {}
h_p_copy = process_header_map(header_params)
global request_url_map
if(is_static_request(r_type)):
req_str = static_data_base_url + '/' + request_url_map[r_type]
else:
req_str = base_url + request_url_map[r_type] + '/' + str(r_value)
req_str += add_header_query(h_p_copy)
res['req_str'] = req_str
res['r_type'] = r_type
return res
def valid_response(response):
return response.status_code == 200
def split_rates(text):
return text.split(',')
def calculate_rate(rate_text):
r_data = rate_text.split(':')
r_count = int(r_data[0])
r_time = int(r_data[1])
return r_count,r_time
def process_rates(text):
rate_text_li = split_rates(text)
r_map = {}
for rate_text in rate_text_li:
r_count, r_time = calculate_rate(rate_text)
r_map[r_time] = r_count
return r_map
def calculate_wait_response(rate_limits,rate_counts):
max_wait = 0
for r_time in rate_limits:
if r_time in rate_counts:
cur_count = rate_counts[r_time]
cur_limit = rate_limits[r_time]
if(cur_count >= cur_limit):
max_wait = max(max_wait,r_time)
return max_wait
def process_app_rate(headers):
# get_app_rate_limit
# get_app_rate_count
# X-App-Rate-Limit
# X-App-Rate-Limit-Count
# check if the app-rate-limit is in the header
if 'X-App-Rate-Limit' not in headers:
return 0
app_rate_limits = process_rates(headers['X-App-Rate-Limit'])
app_rate_counts = process_rates(headers['X-App-Rate-Limit-Count'])
max_wait = calculate_wait_response(app_rate_limits,app_rate_counts)
return max_wait
pass
#
# Per endpoint keep track of the last time the endpoint was called
# and the response from the endpoint in order to calculate "have we waited long enough for the method-rate-limit"
#
#
def process_endpoint_rate(headers):
pass
def add_app_wait_time(wait_time):
if 'app' not in wait_map:
wait_map['app'] = {'wait':0, 'time':0}
wait_map['app']['wait'] = wait_time
wait_map['app']['time'] = time.time()
# Process response headers which give the rate limits and the current count
# and does the sleep here
#
#
def process_response_headers(headers):
# Process the application rate limit
wait_time = process_app_rate(headers)
if wait_time > 0:
add_app_wait_time(wait_time)
#get_endpoint_rate(headers)
# and also process the endpoint rate limit
pass
#TODO: What should happen in the event of a 4XX error
# request function
#
# high level function that actually requests the desired resource indicated by r_type and r_value
# with the additional parameters in the header params map
#
def retrieve_app_wait():
res = 0
if 'app' in wait_map:
had_to_wait = wait_map['app']['wait']
elapsed = time.time() - wait_map['app']['time']
res = max(had_to_wait - elapsed,0)
return res
def proper_wait(req):
app_wait_time = retrieve_app_wait()
if app_wait_time > 0:
print("SLEEPING ", app_wait_time,"!!!!!")
time.sleep(app_wait_time)
def execute_request(req):
req_str = req['req_str']
r_type = req['r_type']
counter = 0
# print(req_str)
response_json = None
while counter < 2:
proper_wait(req)
response = requests.get(req_str)
# print(response.headers)
if valid_response(response):
response_json = response.json()
counter = 2
else:
print(req_str)
print(response.text)
print(response.headers)
process_response_headers(response.headers) #TODO: IN FUTURE, BECAUSE ENDPOINT HAS DIF RATE LIMIT, MAKE IT HIT DIFFERENTLY FOR THAT ENDPOINT
counter += 1
return response_json
# Build in rate limiting
# Once we execute a request,
# We look at the response headers to see if we went over the rate limit.
# If we did, back off for some time in some way depending upon what the headers said
#
# def ex_req(req_str):
# response = requests.get(req_str)
# if valid_response(response):
# proceed as normal
# request
# Top Level function
# requests data from riot api
# r_type : type of request : string
# r_value : value of request : string
# header_params : extra header params in the query : dict
def request(r_type,r_value=None,header_params=None):
req = gen_request(r_type,r_value,header_params)
response_json = execute_request(req)
return response_json
def testing_match_list():
s1 = request('match_list','PhjloNxjxrIOTQ4trcehe8OGiU9ABj933DBGRnO4GBfqNw',{'season':'4'})
assert('matches' in s1)
x = s1['matches']
for y in x:
assert('season' in y)
assert(y['season'] == 4)
def testing_account():
s1 = request('account','PhjloNxjxrIOTQ4trcehe8OGiU9ABj933DBGRnO4GBfqNw')
assert('accountId' in s1)
def test_timeline():
s1 = request('matchTimeline','2858485810')
assert('frameInterval' in s1)
def test_match():
s1 = request('match','2858485810')
assert('queueId' in s1)
def testing_summoner_name_DNE():
s1 = request('summoner','timban')
assert(s1 == None)
def testing_summoner_name_pass():
s1 = request('summoner','chulchultrain',{'beginTime':'1451628000000','season':'4'})
assert( 'accountId' in s1)
def test_static():
res = request('champion')['data']
assert('Aatrox' in res)
def multi_wait_test():
test_subjects = [3218339380,3217748340, 3217743073, 3217030001,3216853512,3216649335,3216644079,3216557548,3215842512,3215807647,3215772284,3215350679,3215146839,
3215114074,3214903763,3214901255,3213701146,3213556389,3213599789,3212815373,3212640620,3211838405,3211844286, 3211830021,3211795291,3211780042,3211704802,3210818094]
# for x in test_subjects:
# request('match',x)
summoner_names = ['chulchultrain','chulminyang','a2y','jehpoody','crysteenah','sbaneling','summontheez','timbangu','starcalls coffee','blanket robber','ilovememundo',
'temporaltempest,','gofortheotherguy','kidhybrid','tankage','gosusummoner','black zealot',]
# for s in summoner_names:
# request('summoner',s)
m_l = request('match_list','PhjloNxjxrIOTQ4trcehe8OGiU9ABj933DBGRnO4GBfqNw')['matches']
for m in m_l:
request('match',m['gameId'])
def testing():
testing_summoner_name_DNE()
testing_summoner_name_pass()
testing_account()
testing_match_list()
test_timeline()
test_match()
multi_wait_test()
pass
if __name__ == "__main__":
testing()
test_static()
|
kiszk/spark | refs/heads/master | examples/src/main/python/mllib/naive_bayes_example.py | 106 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NaiveBayes Example.
Usage:
`spark-submit --master local[4] examples/src/main/python/mllib/naive_bayes_example.py`
"""
from __future__ import print_function
import shutil
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonNaiveBayesExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4])
# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)
# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('model accuracy {}'.format(accuracy))
# Save and load model
output_dir = 'target/tmp/myNaiveBayesModel'
shutil.rmtree(output_dir, ignore_errors=True)
model.save(sc, output_dir)
sameModel = NaiveBayesModel.load(sc, output_dir)
predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('sameModel accuracy {}'.format(accuracy))
# $example off$
|
zaina/nova | refs/heads/master | nova/api/openstack/compute/plugins/v3/console_auth_tokens.py | 36 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova.i18n import _
ALIAS = "os-console-auth-tokens"
authorize = extensions.os_compute_authorizer(ALIAS)
class ConsoleAuthTokensController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self._consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
super(ConsoleAuthTokensController, self).__init__(*args, **kwargs)
@extensions.expected_errors((400, 401, 404))
def show(self, req, id):
"""Checks a console auth token and returns the related connect info."""
context = req.environ['nova.context']
authorize(context)
token = id
if not token:
msg = _("token not provided")
raise webob.exc.HTTPBadRequest(explanation=msg)
connect_info = self._consoleauth_rpcapi.check_token(context, token)
if not connect_info:
raise webob.exc.HTTPNotFound(explanation=_("Token not found"))
console_type = connect_info.get('console_type')
# This is currently required only for RDP consoles
if console_type != "rdp-html5":
raise webob.exc.HTTPUnauthorized(
explanation=_("The requested console type details are not "
"accessible"))
return {'console':
{i: connect_info[i]
for i in ['instance_uuid', 'host', 'port',
'internal_access_path']
if i in connect_info}}
class ConsoleAuthTokens(extensions.V3APIExtensionBase):
"""Console token authentication support."""
name = "ConsoleAuthTokens"
alias = ALIAS
version = 1
def get_resources(self):
controller = ConsoleAuthTokensController()
ext = extensions.ResourceExtension(ALIAS,
controller)
return [ext]
def get_controller_extensions(self):
return []
|
CoolCloud/ansible | refs/heads/devel | lib/ansible/plugins/action/assert.py | 163 | # Copyright 2012, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if not 'that' in self._task.args:
raise AnsibleError('conditional required in "that" string')
msg = None
if 'msg' in self._task.args:
msg = self._task.args['msg']
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [ thats ]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
for that in thats:
cond.when = [ that ]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result = dict(
failed = True,
evaluated_to = test_result,
assertion = that,
)
if msg:
result['msg'] = msg
return result
return dict(changed=False, msg='all assertions passed')
|
j-silver/quantum_dots | refs/heads/master | entropy_plot.py | 1 | #!/usr/bin/env python
#
# entropy_plot.py
#
import matplotlib.pyplot as plt
import numpy as np
# import csv
fig = plt.figure()
# Reading data
redent = (np.loadtxt('RED-ENTROPY.dat')).T
cpent = (np.loadtxt( 'CP-ENTROPY.dat')).T
redprod = (np.loadtxt('RED-ENTROPY-PROD.dat')).T
cpprod = (np.loadtxt('CP-ENTROPY-PROD.dat')).T
#
# Subplot n.1 : Entropy evolution
#
plt.subplot(211)
plt.title('Entropy time-evolution', fontsize=40)
# Setup labels
plt.xlabel('$t \Delta$', fontsize=40)
plt.ylabel('$S(t)$', rotation='horizontal', fontsize=40)
#
# Text box
#
plt.text(100, 0.1, '$\Omega/\Delta=2$\n$\kappa_B T/\hbar \Delta=0.1$\n$\\alpha=0.005$\n$\\rho_0=\\vert z;-\\rangle$', bbox={'facecolor':'white'})
#plt.text(250, 0.5, '$\Omega/\Delta=2$\n$\kappa_B T/\hbar \Delta=0.1$\n$\\alpha=0.005$\n$\\rho_0=\{1, 0, 0.5, -0.4\}$', bbox={'facecolor':'white'})
# Plotting
rfig = plt.plot(redent[0], redent[1], color='red',
label='Redfield dynamics entropy')
cfig = plt.plot(cpent[0], cpent[1], color='blue',
label='Completely positive dynamics entropy')
# Maximum entropy
maxent = np.log(2.0)*np.ones_like(redent[0])
plt.plot(redent[0], maxent)
plt.grid(True)
plt.legend(('Redfield dynamics entropy',
'Completely positive dynamics entropy'), loc='upper left',
bbox_to_anchor=(0.2, 0.95))
#ax = plt.twinx()
#
# Subplot n.2 : Entropy production
#
plt.subplot(212)
plt.title('Internal entropy production')
plt.xlabel('$t \Delta$')
plt.ylabel('$\sigma(t)$', rotation='horizontal')
rpfig = plt.plot(redprod[0], redprod[1], 'y-', label='Redfield entropy prod.')
cpfig = plt.plot(cpprod[0], cpprod[1], 'c-', label='Completely posit. entropy prod.')
plt.grid(True)
plt.legend(('Redfield entropy prod.', 'Completely posit. entropy prod.'),
loc='upper left', bbox_to_anchor=(0.2, 0.8))
plt.show()
|
SujaySKumar/django | refs/heads/master | tests/flatpages_tests/test_templatetags.py | 309 | from django.contrib.auth.models import AnonymousUser, User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageTemplateTagTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
def test_get_flatpages_tag(self):
"The flatpage template tag retrieves unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_anon_user(self):
"The flatpage template tag retrieves unregistered flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for anonuser as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_user(self):
"The flatpage template tag retrieves all flatpages for an authenticated user"
me = User.objects.create_user('testuser', '[email protected]', 's3krit')
out = Template(
"{% load flatpages %}"
"{% get_flatpages for me as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': me
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,")
def test_get_flatpages_with_prefix(self):
"The flatpage template tag retrieves unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_anon_user(self):
"The flatpage template tag retrieves unregistered prefixed flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for anonuser as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_user(self):
"The flatpage template tag retrieve prefixed flatpages for an authenticated user"
me = User.objects.create_user('testuser', '[email protected]', 's3krit')
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for me as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': me
}))
self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,")
def test_get_flatpages_with_variable_prefix(self):
"The prefix for the flatpage template tag can be a template variable"
out = Template(
"{% load flatpages %}"
"{% get_flatpages location_prefix as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'location_prefix': '/location/'
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_parsing_errors(self):
"There are various ways that the flatpages template tag won't parse"
render = lambda t: Template(t).render(Context())
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as flatpages asdf %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages for user as flatpages asdf %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages prefix for user as flatpages asdf %}")
|
trabacus-softapps/openerp-8.0-cc | refs/heads/master | openerp/tests/test_mail_examples.py | 56 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MISC_HTML_SOURCE = """
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello ${object.partner_id.name},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>${object.number}</strong><br />
Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br />
Invoice date: ${object.date_invoice}<br />
Order reference: ${object.origin}<br />
Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="${object.paypal_url}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing ${object.company_id.name or 'us'}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
${object.company_id.street}<br/>
${object.company_id.street2}<br/>
${object.company_id.zip} ${object.company_id.city}<br/>
${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: ${object.company_id.phone}
</div>
<div>
Web : <a href="${object.company_id.website}">${object.company_id.website}</a>
</div>
</div>
</div></body></html>"""
OERP_WEBSITE_HTML_1 = """
<div>
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb16" data-snippet-id="colmd">
<h2>OpenERP HR Features</h2>
<h3 class="text-muted">Manage your company most important asset: People</h3>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg">
<h4 class="mt16">Streamline Recruitments</h4>
<p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p>
<p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg">
<h4 class="mt16">Enterprise Social Network</h4>
<p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p>
<p>Interact with your collegues in real time with live chat.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg">
<h4 class="mt16">Leaves Management</h4>
<p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p>
</div>
</div>
</div>
</div>"""
OERP_WEBSITE_HTML_1_IN = [
'Manage your company most important asset: People',
'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"',
]
OERP_WEBSITE_HTML_1_OUT = [
'Break down information silos.',
'Keep track of the vacation days accrued by each employee',
'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg',
]
OERP_WEBSITE_HTML_2 = """
<div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text">
<section class="mt16 mb16" data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>
OpenERP Project Management
</h2>
<h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3>
</div>
<div class="col-md-12 mb16 mt16" data-snippet-id="colmd">
<p>
OpenERP's <b>collaborative and realtime</b> project
management helps your team get work done. Keep
track of everything, from the big picture to the
minute details, from the customer contract to the
billing.
</p><p>
Organize projects around <b>your own processes</b>. Work
on tasks and issues using the kanban view, schedule
tasks using the gantt chart and control deadlines
in the calendar view. Every project may have it's
own stages allowing teams to optimize their job.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="image-text">
<div class="container">
<div class="row">
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg">
</div>
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Manage Your Shops</h3>
<p>
OpenERP's Point of Sale introduces a super clean
interface with no installation required that runs
online and offline on modern hardwares.
</p><p>
It's full integration with the company inventory
and accounting, gives you real time statistics and
consolidations amongst all shops without the hassle
of integrating several applications.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="text-image">
<div class="container">
<div class="row">
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Enterprise Social Network</h3>
<p>
Make every employee feel more connected and engaged
with twitter-like features for your own company. Follow
people, share best practices, 'like' top ideas, etc.
</p><p>
Connect with experts, follow what interests you, share
documents and promote best practices with OpenERP
Social application. Get work done with effective
collaboration across departments, geographies
and business applications.
</p>
</div>
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/text_image.png">
</div>
</div>
</div>
</section><section class="" data-snippet-id="portfolio">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>Our Porfolio</h2>
<h4 class="text-muted">More than 500 successful projects</h4>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
</div>
</div>
</div>
</section>
</div>
"""
OERP_WEBSITE_HTML_2_IN = [
'management helps your team get work done',
]
OERP_WEBSITE_HTML_2_OUT = [
'Make every employee feel more connected',
'img class="img-responsive shadow" src="/website/static/src/img/text_image.png',
]
TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = ["""--
MySignature"""]
TEXT_2 = """Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature
Bert TARTOPOILS
[email protected]
"""
TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)",
"""> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> [email protected]
>>
>
>
> --
> RaoulSignature"""]
HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature</p>"""
HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
HTML_1_OUT = ["""--
MySignature"""]
HTML_2 = """<div>
<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>
</div>
<div>
<ul>
<li><span>9 AM: brainstorming about our new amazing business app</span></li>
<li><span>9.45 AM: summary</span></li>
<li><span>10 AM: meeting with Fabien to present our app</span></li>
</ul>
</div>
<div>
<font><span>Is everything ok for you ?</span></font>
</div>"""
HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>",
"<li><span>9 AM: brainstorming about our new amazing business app</span></li>",
"<li><span>9.45 AM: summary</span></li>",
"<li><span>10 AM: meeting with Fabien to present our app</span></li>",
"<font><span>Is everything ok for you ?</span></font>"]
HTML_2_OUT = []
HTML_3 = """<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>
<pre>Hi,
My CRM-related question.
Regards,
XXXX</pre></div>"""
HTML_3_IN = ["""<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>"""]
HTML_3_OUT = ["Hi,", "My CRM-related question.",
"Regards,"]
HTML_4 = """
<div>
<div>Hi Nicholas,</div>
<br>
<div>I'm free now. 00447710085916.</div>
<br>
<div>Regards,</div>
<div>Nicholas</div>
<br>
<span id="OLK_SRC_BODY_SECTION">
<div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt">
<span style="font-weight:bold">From: </span>OpenERP Enterprise <<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Reply-To: </span><<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User <<a href="mailto:[email protected]">[email protected]</a>><br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br>
</div>
<br>
<div>
<p>Hello Nicholas Saxlund, </p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?
</p>
<p>Best regards, </p>
<pre><a href="http://openerp.com">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</span>
</div>"""
HTML_5 = """<div><pre>Hi,
I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP.
I created a database and started to install module by log in as administrator.
However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement.
Could you please let me know how could I fix this problem?
Regards,
Goh Sin Yih
________________________________
From: OpenERP Enterprise <[email protected]>
To: [email protected]
Sent: Friday, February 8, 2013 12:46 AM
Subject: Feedback From Your OpenERP Trial
Hello Goh Sin Yih,
Thank you for having tested OpenERP Online.
I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it.
So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP?
Thanks in advance for providing your feedback,
Do not hesitate to contact me if you have any questions,
Thanks,
</pre>"""
GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:[email protected]">[email protected]</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>--<br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.']
GMAIL_1_OUT = ['Administrator', 'Log in our portal at:']
THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM,
<a href="mailto:[email protected]">[email protected]</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon']
HOTMAIL_1 = """<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br> <br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: [email protected]<br>To: [email protected]<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."]
HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.",
"We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"]
MSOFFICE_1 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:[email protected]]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
MSOFFICE_2 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Nicolas,</span></p><p></p>
<p></p>
<p class="MsoNormal" style="text-indent:.5in">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thank You</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Matt</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Raoul Petitpoil</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Poil Industries</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Information Technology</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">920 Super Street</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Tel: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Fax: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Email: </span>
<a href="mailto:[email protected]">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:blue">[email protected]</span>
</a>
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.poilindustries.com</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.superproducts.com</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span>
</b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:[email protected]] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul Petitpoil, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_2_IN = ['We are currently investigating the possibility']
MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
MSOFFICE_3 = """<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi Nicolas !</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Bien à vous, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Met vriendelijke groeten, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Best regards,</span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">
</span></b></p><p><b> </b></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">R. Petitpoil <br></span>
</b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v. <i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:gray"><br></span>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">xxx.xxx </span>
</b>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:gray"><br></span>
</b>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Wingdings 2";color:#1F497D">7</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:"Wingdings 2";color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">+32 2 727.05.91<br></span>
</b>
<span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:green"> <b> </b></span>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:green">Please consider the environment before printing this email.</span>
</b>
<span lang="EN-GB" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:navy"> </span>
<span lang="EN-GB" style="font-family:"Calibri","sans-serif";color:navy">
</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal">
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span>
</b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:[email protected]] <br><b>Envoyé :</b> jeudi 18 avril 2013 11:31<br><b>À :</b> Paul Richard<br><b>Objet :</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul PETITPOIL, </p>
<p></p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_3_IN = ['I saw your boss yesterday']
MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = """<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
'Hi Migration Team',
'Paragraph 1'
]
BUG_1_OUT = [
'Olivier Laurent',
'Chaussée de Namur',
'81.81.37.00',
'openerp.com',
]
BUG2 = """
<div>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Original Message --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Subject:
</th>
<td>Fwd: TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date: </th>
<td>Wed, 16 Oct 2013 14:11:13 +0200</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">From: </th>
<td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">To: </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a></td>
</tr>
</tbody>
</table>
<br>
<br>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Message original --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Sujet:
</th>
<td>TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date :
</th>
<td>Wed, 16 Oct 2013 10:34:45 -0000</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">De : </th>
<td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Répondre
à : </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:[email protected]">[email protected]</a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Pour :
</th>
<td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:[email protected]"><[email protected]></a></td>
</tr>
</tbody>
</table>
<br>
<br>
<div>
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonjour,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<div>
<div style="border:none;border-top:solid #B5C4DF
1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:[email protected]">mailto:[email protected]</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 20:03<br>
<b>À :</b> 'Followers of
INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br>
<b>Objet :</b> RE: OpenERP S.A. Payment Reminder</span></p>
</div>
</div>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonsoir,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Je
me permets de revenir vers vous par écrit , car j’ai
fait 2 appels vers votre service en exposant mon
problème, mais je n’ai pas eu de retour.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cela
fait un mois que j’ai fait la souscription de votre
produit, mais je me rends compte qu’il est pas adapté à
ma situation ( fonctionnalité manquante et surtout je
n’ai pas beaucoup de temps à passer à résoudre des
bugs). </span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">C’est
pourquoi , j’ai demandé qu’un accord soit trouvé avec
vous pour annuler le contrat (tout en vous payant le
mois d’utilisation de septembre).</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Ida
Siwatala</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
<a href="mailto:[email protected]">[email protected]</a>
[<a href="mailto:[email protected]">mailto:[email protected]</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 17:41<br>
<b>À :</b> <a href="mailto:[email protected]">[email protected]</a><br>
<b>Objet :</b> OpenERP S.A. Payment Reminder</span></p>
<p> </p>
<div>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Dear
INZO services,</span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Exception
made if there was a mistake of ours, it seems that the
following amount stays unpaid. Please, take
appropriate measures in order to carry out this
payment in the next 8 days. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"></span></p>
<p> </p>
<table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0">
<tbody>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Date de facturation</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Description</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Reference</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Due Date</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Amount (€)</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Lit.</p>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013/1121</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>Enterprise - Inzo Services
- Juillet 2013</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>420.0</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Amount
due : 420.00 € </span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Would
your payment have been carried out after this mail was
sent, please ignore this message. Do not hesitate to
contact our accounting department. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"><br>
Best Regards, <br>
Aurore Lesage <br>
OpenERP<br>
Chaussée de Namur, 40 <br>
B-1367 Grand Rosières <br>
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br>
E-mail : <a href="mailto:[email protected]">[email protected]</a> <br>
Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p>
</div>
</div>
</div>
--<br>
INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP
S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small>
<small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&login=che&message_id=5750830">in
OpenERP</a></small> <br>
<pre class="moz-signature" cols="72">--
Christine Herrmann
OpenERP
Chaussée de Namur, 40
B-1367 Grand Rosières
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01
Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre>
<br>
</div>
<br>
<br>
</div>
<br>
</div>"""
BUG_2_IN = [
'read more',
'...',
]
BUG_2_OUT = [
'Fwd: TR: OpenERP S.A'
'fait un mois'
]
|
asgard-lab/neutron | refs/heads/master | neutron/api/v2/router.py | 6 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi as base_wsgi
import routes as routes_mapper
import six
import six.moves.urllib.parse as urlparse
import webob
import webob.dec
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron import manager
from neutron import policy
from neutron.quota import resource_registry
from neutron import wsgi
LOG = logging.getLogger(__name__)
RESOURCES = {'network': 'networks',
'subnet': 'subnets',
'subnetpool': 'subnetpools',
'port': 'ports'}
SUB_RESOURCES = {}
COLLECTION_ACTIONS = ['index', 'create']
MEMBER_ACTIONS = ['show', 'update', 'delete']
REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'json'}
class Index(wsgi.Application):
def __init__(self, resources):
self.resources = resources
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
metadata = {}
layout = []
for name, collection in six.iteritems(self.resources):
href = urlparse.urljoin(req.path_url, collection)
resource = {'name': name,
'collection': collection,
'links': [{'rel': 'self',
'href': href}]}
layout.append(resource)
response = dict(resources=layout)
content_type = req.best_match_content_type()
body = wsgi.Serializer(metadata=metadata).serialize(response,
content_type)
return webob.Response(body=body, content_type=content_type)
class APIRouter(base_wsgi.Router):
@classmethod
def factory(cls, global_config, **local_config):
return cls(**local_config)
def __init__(self, **local_config):
mapper = routes_mapper.Mapper()
plugin = manager.NeutronManager.get_plugin()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
col_kwargs = dict(collection_actions=COLLECTION_ACTIONS,
member_actions=MEMBER_ACTIONS)
def _map_resource(collection, resource, params, parent=None):
allow_bulk = cfg.CONF.allow_bulk
allow_pagination = cfg.CONF.allow_pagination
allow_sorting = cfg.CONF.allow_sorting
controller = base.create_resource(
collection, resource, plugin, params, allow_bulk=allow_bulk,
parent=parent, allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
path_prefix = None
if parent:
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection)
mapper_kwargs = dict(controller=controller,
requirements=REQUIREMENTS,
path_prefix=path_prefix,
**col_kwargs)
return mapper.collection(collection, resource,
**mapper_kwargs)
mapper.connect('index', '/', controller=Index(RESOURCES))
for resource in RESOURCES:
_map_resource(RESOURCES[resource], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
RESOURCES[resource], dict()))
resource_registry.register_resource_by_name(resource)
for resource in SUB_RESOURCES:
_map_resource(SUB_RESOURCES[resource]['collection_name'], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
SUB_RESOURCES[resource]['collection_name'],
dict()),
SUB_RESOURCES[resource]['parent'])
# Certain policy checks require that the extensions are loaded
# and the RESOURCE_ATTRIBUTE_MAP populated before they can be
# properly initialized. This can only be claimed with certainty
# once this point in the code has been reached. In the event
# that the policies have been initialized before this point,
# calling reset will cause the next policy check to
# re-initialize with all of the required data in place.
policy.reset()
super(APIRouter, self).__init__(mapper)
|
koditraquinas/koditraquinas.repository | refs/heads/master | plugin.video.traquinas/resources/lib/resolvers/turbovideos.py | 23 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://turbovideos.net/embed-%s.html' % url
result = client.request(url)
url = re.compile('file *: *"(.+?)"').findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
return
|
GrandmasterK/XScheduler | refs/heads/master | venv/lib/python2.7/site-packages/flask/signals.py | 783 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
|
cr/fxos-certsuite | refs/heads/master | web-platform-tests/tests/webdriver/user_input/__init__.py | 12133432 | |
thnee/ansible | refs/heads/devel | test/units/modules/cloud/__init__.py | 12133432 | |
mortada/numpy | refs/heads/master | tools/win32build/misc/x86analysis.py | 75 | #! /usr/bin/env python
# Last Change: Sat Mar 28 02:00 AM 2009 J
# Try to identify instruction set used in binary (x86 only). This works by
# checking the assembly for instructions specific to sse, etc... Obviously,
# this won't work all the times (for example, if some instructions are used
# only after proper detection of the running CPU, this will give false alarm).
from __future__ import division, print_function
import sys
import re
import os
import subprocess
import popen2
import optparse
I486_SET = ["cmpxchg", "xadd", "bswap", "invd", "wbinvd", "invlpg"]
I586_SET = ["rdmsr", "wrmsr", "rdtsc", "cmpxch8B", "rsm"]
PPRO_SET = ["cmovcc", "fcmovcc", "fcomi", "fcomip", "fucomi", "fucomip", "rdpmc", "ud2"]
MMX_SET = ["emms", "movd", "movq", "packsswb", "packssdw", "packuswb", "paddb",
"paddw", "paddd", "paddsb", "paddsw", "paddusb", "paddusw", "pand",
"pandn", "pcmpeqb", "pcmpeqw", "pcmpeqd", "pcmpgtb", "pcmpgtw",
"pcmpgtd", "pmaddwd", "pmulhw", "pmullw", "por", "psllw", "pslld",
"psllq", "psraw", "psrad", "psrlw", "psrld", "psrlq", "psubb", "psubw",
"psubd", "psubsb", "psubsw", "psubusb", "psubusw", "punpckhbw",
"punpckhwd", "punpckhdq", "punpcklbw", "punpcklwd", "punpckldq",
"pxor"]
SSE_SET = ["addps", "addss", "andnps", "andps", "cmpps", "cmpss", "comiss",
"cvtpi2ps", "cvtps2pi", "cvtsi2ss", "cvtss2si", "cvttps2pi",
"cvttss2si", "divps", "divss", "fxrstor", "fxsave", "ldmxcsr", "maxps",
"maxss", "minps", "minss", "movaps", "movhlps", "movhps", "movlhps",
"movlps", "movmskps", "movss", "movups", "mulps", "mulss", "orps",
"pavgb", "pavgw", "psadbw", "rcpps", "rcpss", "rsqrtps", "rsqrtss",
"shufps", "sqrtps", "sqrtss", "stmxcsr", "subps", "subss", "ucomiss",
"unpckhps", "unpcklps", "xorps", "pextrw", "pinsrw", "pmaxsw",
"pmaxub", "pminsw", "pminub", "pmovmskb", "pmulhuw", "pshufw",
"maskmovq", "movntps", "movntq", "prefetch", "sfence"]
SSE2_SET = ["addpd", "addsd", "andnpd", "andpd", "clflush", "cmppd", "cmpsd",
"comisd", "cvtdq2pd", "cvtdq2ps", "cvtpd2pi", "cvtpd2pq", "cvtpd2ps",
"cvtpi2pd", "cvtps2dq", "cvtps2pd", "cvtsd2si", "cvtsd2ss", "cvtsi2sd",
"cvtss2sd", "cvttpd2pi", "cvttpd2dq", "cvttps2dq", "cvttsd2si",
"divpd", "divsd", "lfence", "maskmovdqu", "maxpd", "maxsd", "mfence",
"minpd", "minsd", "movapd", "movd", "movdq2q", "movdqa", "movdqu",
"movhpd", "movlpd", "movmskpd", "movntdq", "movnti", "movntpd", "movq",
"movq2dq", "movsd", "movupd", "mulpd", "mulsd", "orpd", "packsswb",
"packssdw", "packuswb", "paddb", "paddw", "paddd", "paddq", "paddq",
"paddsb", "paddsw", "paddusb", "paddusw", "pand", "pandn", "pause",
"pavgb", "pavgw", "pcmpeqb", "pcmpeqw", "pcmpeqd", "pcmpgtb",
"pcmpgtw", "pcmpgtd", "pextrw", "pinsrw", "pmaddwd", "pmaxsw",
"pmaxub", "pminsw", "pminub", "pmovmskb", "pmulhw", "pmulhuw",
"pmullw", "pmuludq", "pmuludq", "por", "psadbw", "pshufd", "pshufhw",
"pshuflw", "pslldq", "psllw", "pslld", "psllq", "psraw", "psrad",
"psrldq", "psrlw", "psrld", "psrlq", "psubb", "psubw", "psubd",
"psubq", "psubq", "psubsb", "psubsw", "psubusb", "psubusw", "psubsb",
"punpckhbw", "punpckhwd", "punpckhdq", "punpckhqdq", "punpcklbw",
"punpcklwd", "punpckldq", "punpcklqdq", "pxor", "shufpd", "sqrtpd",
"sqrtsd", "subpd", "subsd", "ucomisd", "unpckhpd", "unpcklpd", "xorpd"]
SSE3_SET = [ "addsubpd", "addsubps", "haddpd", "haddps", "hsubpd", "hsubps",
"lddqu", "movddup", "movshdup", "movsldup", "fisttp"]
def get_vendor_string():
"""Return the vendor string reading cpuinfo."""
try:
a = open('/proc/cpuinfo').readlines()
b = re.compile('^vendor_id.*')
c = [i for i in a if b.match(i)]
except IOError:
raise ValueError("Could not read cpuinfo")
int = re.compile("GenuineIntel")
amd = re.compile("AuthenticAMD")
cyr = re.compile("CyrixInstead")
tra = re.compile("GenuineTMx86")
if int.search(c[0]):
return "intel"
elif amd.search(c[0]):
return "amd"
elif cyr.search(c[0]):
return "cyrix"
elif tra.search(c[0]):
return "tra"
else:
raise ValueError("Unknown vendor")
def disassemble(filename):
"""From a filename, returns a list of all asm instructions."""
cmd = "i586-mingw32msvc-objdump -d %s " % filename
o, i = popen2.popen2(cmd)
def floupi(line):
line1 = line.split('\t')
if len(line1) > 2:
line2 = line1[2]
else:
line2 = line1[0]
line3 = line2.split(' ')
if len(line3) > 1:
inst = line3[0]
else:
inst = line3[0]
return inst
inst = [floupi(i) for i in o]
return inst
def has_set(seq, asm_set):
a = dict([(i, 0) for i in asm_set])
for i in asm_set:
a[i] = seq.count(i)
return a
def has_sse(seq):
return has_set(seq, SSE_SET)
def has_sse2(seq):
return has_set(seq, SSE2_SET)
def has_sse3(seq):
return has_set(seq, SSE3_SET)
def has_mmx(seq):
return has_set(seq, MMX_SET)
def has_ppro(seq):
return has_set(seq, PPRO_SET)
def cntset(seq):
cnt = 0
for i in seq.values():
cnt += i
return cnt
def main():
#parser = optparse.OptionParser()
#parser.add_option("-f", "--filename
args = sys.argv[1:]
filename = args[0]
analyse(filename)
def analyse(filename):
print(get_vendor_string())
print("Getting instructions...")
inst = disassemble(filename)
print("Counting instructions...")
sse = has_sse(inst)
sse2 = has_sse2(inst)
sse3 = has_sse3(inst)
#mmx = has_mmx(inst)
#ppro = has_ppro(inst)
#print sse
#print sse2
#print sse3
print("SSE3 inst %d" % cntset(sse3))
print("SSE2 inst %d" % cntset(sse2))
print("SSE inst %d" % cntset(sse))
print("Analysed %d instructions" % len(inst))
if __name__ == '__main__':
main()
#filename = "/usr/lib/sse2/libatlas.a"
##filename = "/usr/lib/sse2/libcblas.a"
|
biocore/tax2tree | refs/heads/master | t2t/consensus.py | 1 | #!/usr/bin/env python
"""TODO: place all consensus methods here
load_consensus_map
make_consensus_tree
etc...
"""
from t2t.nlevel import RANK_ORDER
from numpy import zeros, where, logical_or, long
def taxa_score(master, reps):
"""Score taxa strings by contradictions observed in reps"""
n_ranks = len(RANK_ORDER)
master_ids = frozenset(master.keys())
master_order = master.keys()
master_rows = {k: idx for idx, k in enumerate(master_order)}
master_cons = [master[k] for k in master_order]
scores = zeros((len(master_ids), n_ranks), dtype=float)
n_reps = 0
for rep in reps:
n_reps += 1
for id_, con in rep.items():
if id_ not in master_ids:
raise KeyError("Unknown key %s in replicate" % id_)
row = master_rows[id_]
for rank, name in enumerate(con):
if name == master_cons[row][rank]:
scores[row, rank] += 1
# missing taxa are not considered contradictions
missing_taxa = master_ids - frozenset(rep)
for k in missing_taxa:
row = master_rows[k]
scores[row] += 1
scores /= n_reps
# slice and dice the scores
return {k: scores[master_rows[k]] for k in master}
def merge_taxa_strings_and_scores(master, scores):
"""Merge taxa strings and their scores, return {id_:(taxa,score)}"""
return {k: list(zip(v, scores[k])) for k, v in master.items()}
def taxa_score_hash(master, reps):
"""Score each taxonomy string based on contradictions observed in reps"""
n_ranks = len(RANK_ORDER)
master_order = master.keys()
scores = zeros((len(master_order), n_ranks), dtype=float)
master_hash = hash_cons(master, master_order, n_ranks)
n_reps = 0
for rep in reps:
n_reps += 1
rep_hash = hash_cons(rep, master_order, n_ranks)
# where the taxons are equal to the master
# or if the the taxon is not present in the replicate
scores += where(logical_or(rep_hash == master_hash, rep_hash == 0),
1, 0)
scores /= n_reps
# slice and dice the scores
return {k: scores[idx] for idx, k in enumerate(master_order)}
def hash_cons(cons, order, n_ranks):
"""Returns a numpy array of hash values for the cons
NOTE: expects that cons are always specified even if the taxon name does
not exist. In other words, the following are acceptable for missing fieids:
[None, 'None', k__, p__, etc...]. It is _NOT_ okay to use the empty string
at a field. The python hash method returns 0 on an empty string, but never
otherwise and this method treats 0 specially.
"""
hashes = zeros((len(order), n_ranks), dtype=long)
for idx, id_ in enumerate(order):
try:
hashes[idx] = [hash(c) for c in cons[id_]]
except KeyError:
pass
# defaults to zero if the consensus string isn't represented
return hashes
def get_consensus_stats(consensus_map):
"""Returns consensus stats, expects rank prefix
Returns a tuple of two dicts:
- sequence counts per level, (classified, unclassified)
- contains name counts per level
"""
n_seqs = {}
n_names = {}
cons = consensus_map.values()
total_cons = len(cons)
rank_names = None
for co in cons:
if co is None:
continue
rank_names = [c[0] for c in co if c is not None]
break
for idx, rank in enumerate(rank_names):
# collect all cons that are classified (ie more info than k__)
klassed = [c[idx].lower()
for c
in cons if c[idx] and c[idx][0] == rank]
n_classified = len(klassed)
rank_names = set(klassed)
n_seqs[rank] = (n_classified, total_cons - n_classified)
n_names[rank] = rank_names
return (n_seqs, n_names)
def pretty_print_consensus_stats(stats):
seqs, names = stats
print('\t'.join(['rank', 'num_classified', 'num_unclassified',
'num_names']))
for k in RANK_ORDER:
print('\t'.join(map(str, [k, seqs[k][0], seqs[k][1], len(names[k])])))
|
Haynie-Research-and-Development/jarvis | refs/heads/master | deps/lib/python3.4/site-packages/samsungctl/interactive.py | 2 | import curses
_mappings = [
["p", "KEY_POWEROFF", "P", "Power off"],
["KEY_UP", "KEY_UP", "Up", "Up"],
["KEY_DOWN", "KEY_DOWN", "Down", "Down"],
["KEY_LEFT", "KEY_LEFT", "Left", "Left"],
["KEY_RIGHT", "KEY_RIGHT", "Right", "Right"],
["KEY_PPAGE", "KEY_CHUP", "Page Up", "P Up"],
["KEY_NPAGE", "KEY_CHDOWN", "Page Down", "P Down"],
["\n", "KEY_ENTER", "Enter", "Enter"],
["KEY_BACKSPACE", "KEY_RETURN", "Backspace", "Return"],
["l", "KEY_CH_LIST", "L", "Channel List"],
["m", "KEY_MENU", "M", "Menu"],
["s", "KEY_SOURCE", "S", "Source"],
["g", "KEY_GUIDE", "G", "Guide"],
["t", "KEY_TOOLS", "T", "Tools"],
["i", "KEY_INFO", "I", "Info"],
["z", "KEY_RED", "Z", "A / Red"],
["x", "KEY_GREEN", "X", "B / Green"],
["c", "KEY_YELLOW", "C", "C / Yellow"],
["v", "KEY_BLUE", "V", "D / Blue"],
["d", "KEY_PANNEL_CHDOWN", "D", "3D"],
["+", "KEY_VOLUP", "+", "Volume Up"],
["-", "KEY_VOLDOWN", "-", "Volume Down"],
["*", "KEY_MUTE", "*", "Mute"],
["0", "KEY_0", "0", "0"],
["1", "KEY_1", "1", "1"],
["2", "KEY_2", "2", "2"],
["3", "KEY_3", "3", "3"],
["4", "KEY_4", "4", "4"],
["5", "KEY_5", "5", "5"],
["6", "KEY_6", "6", "6"],
["7", "KEY_7", "7", "7"],
["8", "KEY_8", "8", "8"],
["9", "KEY_9", "9", "9"],
["KEY_F(1)", "KEY_DTV", "F1", "TV Source"],
["KEY_F(2)", "KEY_HDMI", "F2", "HDMI Source"],
]
def run(remote):
"""Run interactive remote control application."""
curses.wrapper(_control, remote)
def _control(stdscr, remote):
height, width = stdscr.getmaxyx()
stdscr.addstr("Interactive mode, press 'Q' to exit.\n")
stdscr.addstr("Key mappings:\n")
column_len = max(len(mapping[2]) for mapping in _mappings) + 1
mappings_dict = {}
for mapping in _mappings:
mappings_dict[mapping[0]] = mapping[1]
row = stdscr.getyx()[0] + 2
if row < height:
line = " {}= {} ({})\n".format(mapping[2].ljust(column_len),
mapping[3], mapping[1])
stdscr.addstr(line)
elif row == height:
stdscr.addstr("[Terminal is too small to show all keys]\n")
running = True
while running:
key = stdscr.getkey()
if key == "q":
running = False
if key in mappings_dict:
remote.control(mappings_dict[key])
try:
stdscr.addstr(".")
except curses.error:
stdscr.deleteln()
stdscr.move(stdscr.getyx()[0], 0)
stdscr.addstr(".")
|
TheTimmy/spack | refs/heads/develop | var/spack/repos/builtin/packages/libctl/package.py | 3 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libctl(AutotoolsPackage):
"""libctl is a free Guile-based library implementing flexible
control files for scientific simulations."""
homepage = "http://ab-initio.mit.edu/wiki/index.php/Libctl"
url = "http://ab-initio.mit.edu/libctl/libctl-3.2.2.tar.gz"
list_url = "http://ab-initio.mit.edu/libctl/old"
version('3.2.2', '5fd7634dc9ae8e7fa70a68473b9cbb68')
depends_on('guile')
def configure_args(self):
spec = self.spec
return [
'--enable-shared',
'GUILE={0}'.format(join_path(
spec['guile'].prefix.bin, 'guile')),
'GUILE_CONFIG={0}'.format(join_path(
spec['guile'].prefix.bin, 'guile-config')),
]
|
sve-odoo/odoo | refs/heads/master | addons/auth_oauth/res_users.py | 157 | import logging
import werkzeug.urls
import urlparse
import urllib2
import simplejson
import openerp
from openerp.addons.auth_signup.res_users import SignupError
from openerp.osv import osv, fields
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'oauth_provider_id': fields.many2one('auth.oauth.provider', 'OAuth Provider'),
'oauth_uid': fields.char('OAuth User ID', help="Oauth Provider user_id", copy=False),
'oauth_access_token': fields.char('OAuth Access Token', readonly=True, copy=False),
}
_sql_constraints = [
('uniq_users_oauth_provider_oauth_uid', 'unique(oauth_provider_id, oauth_uid)', 'OAuth UID must be unique per provider'),
]
def _auth_oauth_rpc(self, cr, uid, endpoint, access_token, context=None):
params = werkzeug.url_encode({'access_token': access_token})
if urlparse.urlparse(endpoint)[4]:
url = endpoint + '&' + params
else:
url = endpoint + '?' + params
f = urllib2.urlopen(url)
response = f.read()
return simplejson.loads(response)
def _auth_oauth_validate(self, cr, uid, provider, access_token, context=None):
""" return the validation data corresponding to the access token """
p = self.pool.get('auth.oauth.provider').browse(cr, uid, provider, context=context)
validation = self._auth_oauth_rpc(cr, uid, p.validation_endpoint, access_token)
if validation.get("error"):
raise Exception(validation['error'])
if p.data_endpoint:
data = self._auth_oauth_rpc(cr, uid, p.data_endpoint, access_token)
validation.update(data)
return validation
def _auth_oauth_signin(self, cr, uid, provider, validation, params, context=None):
""" retrieve and sign in the user corresponding to provider and validated access token
:param provider: oauth provider id (int)
:param validation: result of validation of access token (dict)
:param params: oauth parameters (dict)
:return: user login (str)
:raise: openerp.exceptions.AccessDenied if signin failed
This method can be overridden to add alternative signin methods.
"""
try:
oauth_uid = validation['user_id']
user_ids = self.search(cr, uid, [("oauth_uid", "=", oauth_uid), ('oauth_provider_id', '=', provider)])
if not user_ids:
raise openerp.exceptions.AccessDenied()
assert len(user_ids) == 1
user = self.browse(cr, uid, user_ids[0], context=context)
user.write({'oauth_access_token': params['access_token']})
return user.login
except openerp.exceptions.AccessDenied, access_denied_exception:
if context and context.get('no_user_creation'):
return None
state = simplejson.loads(params['state'])
token = state.get('t')
oauth_uid = validation['user_id']
email = validation.get('email', 'provider_%s_user_%s' % (provider, oauth_uid))
name = validation.get('name', email)
values = {
'name': name,
'login': email,
'email': email,
'oauth_provider_id': provider,
'oauth_uid': oauth_uid,
'oauth_access_token': params['access_token'],
'active': True,
}
try:
_, login, _ = self.signup(cr, uid, values, token, context=context)
return login
except SignupError:
raise access_denied_exception
def auth_oauth(self, cr, uid, provider, params, context=None):
# Advice by Google (to avoid Confused Deputy Problem)
# if validation.audience != OUR_CLIENT_ID:
# abort()
# else:
# continue with the process
access_token = params.get('access_token')
validation = self._auth_oauth_validate(cr, uid, provider, access_token)
# required check
if not validation.get('user_id'):
raise openerp.exceptions.AccessDenied()
# retrieve and sign in user
login = self._auth_oauth_signin(cr, uid, provider, validation, params, context=context)
if not login:
raise openerp.exceptions.AccessDenied()
# return user credentials
return (cr.dbname, login, access_token)
def check_credentials(self, cr, uid, password):
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
res = self.search(cr, SUPERUSER_ID, [('id', '=', uid), ('oauth_access_token', '=', password)])
if not res:
raise
#
|
bobcyw/django | refs/heads/master | tests/datatypes/tests.py | 305 | from __future__ import unicode_literals
import datetime
from django.test import TestCase, skipIfDBFeature
from django.utils import six
from django.utils.timezone import utc
from .models import Donut, RumBaba
class DataTypesTestCase(TestCase):
def test_boolean_type(self):
d = Donut(name='Apple Fritter')
self.assertFalse(d.is_frosted)
self.assertIsNone(d.has_sprinkles)
d.has_sprinkles = True
self.assertTrue(d.has_sprinkles)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertFalse(d2.is_frosted)
self.assertTrue(d2.has_sprinkles)
def test_date_type(self):
d = Donut(name='Apple Fritter')
d.baked_date = datetime.date(year=1938, month=6, day=4)
d.baked_time = datetime.time(hour=5, minute=30)
d.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_date, datetime.date(1938, 6, 4))
self.assertEqual(d2.baked_time, datetime.time(5, 30))
self.assertEqual(d2.consumed_at, datetime.datetime(2007, 4, 20, 16, 19, 59))
def test_time_field(self):
# Test for ticket #12059: TimeField wrongly handling datetime.datetime object.
d = Donut(name='Apple Fritter')
d.baked_time = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
d.save()
d2 = Donut.objects.get(name='Apple Fritter')
self.assertEqual(d2.baked_time, datetime.time(16, 19, 59))
def test_year_boundaries(self):
"""Year boundary tests (ticket #3689)"""
Donut.objects.create(name='Date Test 2007',
baked_date=datetime.datetime(year=2007, month=12, day=31),
consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
Donut.objects.create(name='Date Test 2006',
baked_date=datetime.datetime(year=2006, month=1, day=1),
consumed_at=datetime.datetime(year=2006, month=1, day=1))
self.assertEqual("Date Test 2007",
Donut.objects.filter(baked_date__year=2007)[0].name)
self.assertEqual("Date Test 2006",
Donut.objects.filter(baked_date__year=2006)[0].name)
Donut.objects.create(name='Apple Fritter',
consumed_at=datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59))
self.assertEqual(['Apple Fritter', 'Date Test 2007'],
list(Donut.objects.filter(consumed_at__year=2007).order_by('name').values_list('name', flat=True)))
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2005).count())
self.assertEqual(0, Donut.objects.filter(consumed_at__year=2008).count())
def test_textfields_unicode(self):
"""Regression test for #10238: TextField values returned from the
database should be unicode."""
d = Donut.objects.create(name='Jelly Donut', review='Outstanding')
newd = Donut.objects.get(id=d.id)
self.assertIsInstance(newd.review, six.text_type)
@skipIfDBFeature('supports_timezones')
def test_error_on_timezone(self):
"""Regression test for #8354: the MySQL and Oracle backends should raise
an error if given a timezone-aware datetime object."""
dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=utc)
d = Donut(name='Bear claw', consumed_at=dt)
self.assertRaises(ValueError, d.save)
# ValueError: MySQL backend does not support timezone-aware datetimes.
def test_datefield_auto_now_add(self):
"""Regression test for #10970, auto_now_add for DateField should store
a Python datetime.date, not a datetime.datetime"""
b = RumBaba.objects.create()
# Verify we didn't break DateTimeField behavior
self.assertIsInstance(b.baked_timestamp, datetime.datetime)
# We need to test this way because datetime.datetime inherits
# from datetime.date:
self.assertIsInstance(b.baked_date, datetime.date)
self.assertNotIsInstance(b.baked_date, datetime.datetime)
|
wathen/PhD | refs/heads/master | MHD/FEniCS/BDMns/mixStokesPressureCorrection.py | 1 | #!/usr/bin/python
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
def SaveEpertaMatrix(A,name):
from PyTrilinos import EpetraExt
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
testmat ="".join([name,".mat"])
scipy.io.savemat( testmat, {name: Asparse},oned_as='row')
def NullSpace(A,name):
from PyTrilinos import EpetraExt, Epetra
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
import matplotlib.pylab as plt
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
(Nb,Mb) = Asparse.shape
Aublas1 = Asparse[0:Nb-1,0:Mb-1]
# plt.spy(Aublas1)
# if (Nb < 1000):
# plt.show()
comm = Epetra.PyComm()
Ap = scipy_csr_matrix2CrsMatrix(Aublas1, comm)
return Ap
#MO.SwapBackend('epetra')
#os.system("echo $PATH")
m =6
errL2u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'No'
Saving = 'no'
case = 1
# parameters['linear_algebra_backend'] = 'Epetra'
for xx in xrange(1,m):
print xx
nn = 2**xx
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
# nn = 32
# mesh = UnitSquareMesh(16,16)
# mesh = UnitSquareMesh(nn, nn)
mesh = RectangleMesh(-1, -1, 1, 1, nn, nn,'right')
# tic()
parameters['reorder_dofs_serial'] = False
V = FunctionSpace(mesh, "BDM", 2)
Q = FunctionSpace(mesh, "DG", 1)
parameters['reorder_dofs_serial'] = False
# print 'time to create function spaces', toc(),'\n\n'
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\n\V: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
# u0 = Expression(("sin(pi*x[1])","sin(pi*x[0])"))
# u0 = Expression(("pow(x[1],2)-1","pow(x[0],2)-1"))
if case == 1:
u0 = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
p0 = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)")
elif case == 2:
u0 = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
p0 = Expression("x[1]+x[0]-1")
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
bc = DirichletBC(W.sub(0),u0, boundary)
bc1 = DirichletBC(W.sub(1), p0, boundary)
bcs = [bc]
# v, u = TestFunction(V), TrialFunction(V)
# q, p = TestFunction(Q), TrialFunction(Q)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
# f = Expression(("-pi*pi*sin(pi*x[1])+2*x[1]","-pi*pi*sin(pi*x[0])"))
if case == 1:
f = -Expression(("0","0"))
elif case == 2:
f = -Expression(("-1","-1"))
elif case == 3:
f = -Expression(("8*pi*pi*cos(2*pi*x[1])*sin(2*pi*x[0]) + 2*pi*cos(2*pi*x[0])*sin(2*pi*x[1])","2*pi*cos(2*pi*x[0])*sin(2*pi*x[1]) - 8*pi*pi*cos(2*pi*x[0])*sin(2*pi*x[1])"))
u_k = Function(V)
mu = Constant(1e-0)
N = FacetNormal(mesh)
t = as_vector((-N[0], N[1]))
h = CellSize(mesh)
h_avg =avg(h)
alpha = 10.0
gamma =10.0
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
a11 = inner(grad(v), grad(u))*dx \
- inner(avg(grad(v)), outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v('+'),N('+'))+outer(v('-'),N('-')), avg(grad(u)))*dS \
+ alpha/h_avg*inner(outer(v('+'),N('+'))+outer(v('-'),N('-')),outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v,N), grad(u))*ds \
- inner(grad(v), outer(u,N))*ds \
+ gamma/h*inner(v,u)*ds
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v,f)*dx - gamma/h*inner(u0,v)*ds + inner(grad(v),outer(u0,N))*ds
a = -a11+a12+a21
i = p*q*dx
# AA = assemble(a11)
x = Function(W)
uu = Function(W)
# tic()
AA, bb = assemble_system(a, L1, bcs)
PP, btmp = assemble_system(i+a11, L1, bcs)
DoF = V.dim() + Q.dim()
x_epetra = Epetra.Vector(0*bb.array())
A_epetra = down_cast(AA).mat()
P_epetra = down_cast(PP).mat()
b_epetra = down_cast(bb).vec()
# x_epetra = down_cast(uu.vector()).vec()
A_epetra =NullSpace(A_epetra,"A_epetra")
P_epetra =NullSpace(P_epetra,"P_epetra")
bbb =bb.array()
Nb = bbb.shape
b =bbb[0:Nb[0]-1]
b_epetra = Epetra.Vector(b)
xxx = x.vector().array()
x =xxx[0:Nb[0]-1]
x_epetra = Epetra.Vector(x)
# mlList = {"max levels" : 200,
# "output" : 10,
# "smoother: type" : "symmetric Gauss-Seidel",
# "aggregation: type" : "Uncoupled"
# }
# prec = ML.MultiLevelPreconditioner(P_epetra, False)
# prec.SetParameterList(mlList)
# prec.ComputePreconditioner()
# solver = AztecOO.AztecOO(A_epetra, x_epetra, b_epetra)
# solver.SetPrecOperator(prec)
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_gmres);
# solver.SetAztecOption(AztecOO.AZ_output, 100);
# err = solver.Iterate(20000, 1e-10)
tic()
problem = Epetra.LinearProblem(A_epetra,x_epetra,b_epetra)
print '\n\n\n\n\n\n'
factory = Amesos.Factory()
solver = factory.Create("Amesos_Umfpack", problem)
# solver = factory.Create("MUMPS", problem)
amesosList = {"PrintTiming" : True, "PrintStatus" : True }
solver.SetParameters(amesosList)
solver.SymbolicFactorization()
solver.NumericFactorization()
solver.Solve()
soln = problem.GetLHS()
print "||x_computed||_2 =", soln.Norm2()
# solver.PrintTiming()
print '\n\n\n\n\n\n'
if case == 1:
ue = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
pe = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)+5")
elif case == 2:
ue = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
pe = Expression("x[1]+x[0]-1")
elif case == 3:
ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
# pp = x_epetra[Vdim[xx-1][0]:]
# pa = Function(Q)
# pa1 = Function(Q)
# pa2 = Function(Q)
# pa1.vector()[:] = pp.array
# pa2.vector()[:] = 0*pp.array+1
# pa2.vector().array()
# pa.vector()[:] = pp.array + assemble(pa1*dx)/assemble(pa2*dx)
# uu = x_epetra[0:Vdim[xx-1][0]]
# ua = Function(V)
# ua.vector()[:] = uu.array
u = interpolate(ue,V)
p = interpolate(pe,Q)
Nv = u.vector().array().shape
x = x_epetra[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x.array
pp = x_epetra[Nv[0]:]
pp = pp.array
n = pp.shape
pp = np.insert(pp,n,0)
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array() - assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,Q)
pe = Function(Q)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
errL2u[xx-1] = errornorm(ue,ua,norm_type="L2", degree_rise=4,mesh=mesh)
errL2p[xx-1] = errornorm(pe,pp,norm_type="L2", degree_rise=4,mesh=mesh)
print "\n\n"
print errL2u[xx-1], errL2p[xx-1]
print "\n\n"
del solver
# scipy.io.savemat('Vdim.mat', {'VDoF':Vdim})
# scipy.io.savemat('DoF.mat', {'DoF':DoF})
# plt.loglog(NN,errL2u)
# plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(NN,errL2p)
# plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.show()
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
# plt.loglog(N,erru)
# plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(N,errp)
# plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plot(ua)
# plot(interpolate(ue,V))
plot(pp)
plot(interpolate(pe,Q))
interactive()
plt.show()
|
camradal/ansible | refs/heads/devel | test/units/module_utils/ec2/__init__.py | 12133432 | |
gugahoi/maraschino | refs/heads/master | lib/flask/testsuite/test_apps/blueprintapp/__init__.py | 629 | from flask import Flask
app = Flask(__name__)
from blueprintapp.apps.admin import admin
from blueprintapp.apps.frontend import frontend
app.register_blueprint(admin)
app.register_blueprint(frontend)
|
hroncok/freeipa | refs/heads/master | ipalib/plugins/dns.py | 1 | # Authors:
# Martin Kosek <[email protected]>
# Pavel Zuna <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import netaddr
import time
import re
import binascii
import dns.name
import dns.exception
import dns.rdatatype
import dns.resolver
import encodings.idna
from ipalib.request import context
from ipalib import api, errors, output
from ipalib import Command
from ipalib.capabilities import VERSION_WITHOUT_CAPABILITIES
from ipalib.parameters import (Flag, Bool, Int, Decimal, Str, StrEnum, Any,
DeprecatedParam, DNSNameParam)
from ipalib.plugable import Registry
from ipalib.plugins.baseldap import *
from ipalib import _, ngettext
from ipalib import messages
from ipalib.util import (normalize_zonemgr,
get_dns_forward_zone_update_policy,
get_dns_reverse_zone_update_policy,
get_reverse_zone_default, REVERSE_DNS_ZONES,
normalize_zone, validate_dnssec_global_forwarder,
DNSSECSignatureMissingError, UnresolvableRecordError,
EDNS0UnsupportedError, DNSSECValidationError,
validate_dnssec_zone_forwarder_step1,
validate_dnssec_zone_forwarder_step2)
from ipapython.ipautil import CheckedIPAddress, is_host_resolvable
from ipapython.dnsutil import DNSName
__doc__ = _("""
Domain Name System (DNS)
""") + _("""
Manage DNS zone and resource records.
""") + _("""
SUPPORTED ZONE TYPES
* Master zone (dnszone-*), contains authoritative data.
* Forward zone (dnsforwardzone-*), forwards queries to configured forwarders
(a set of DNS servers).
""") + _("""
USING STRUCTURED PER-TYPE OPTIONS
""") + _("""
There are many structured DNS RR types where DNS data stored in LDAP server
is not just a scalar value, for example an IP address or a domain name, but
a data structure which may be often complex. A good example is a LOC record
[RFC1876] which consists of many mandatory and optional parts (degrees,
minutes, seconds of latitude and longitude, altitude or precision).
""") + _("""
It may be difficult to manipulate such DNS records without making a mistake
and entering an invalid value. DNS module provides an abstraction over these
raw records and allows to manipulate each RR type with specific options. For
each supported RR type, DNS module provides a standard option to manipulate
a raw records with format --<rrtype>-rec, e.g. --mx-rec, and special options
for every part of the RR structure with format --<rrtype>-<partname>, e.g.
--mx-preference and --mx-exchanger.
""") + _("""
When adding a record, either RR specific options or standard option for a raw
value can be used, they just should not be combined in one add operation. When
modifying an existing entry, new RR specific options can be used to change
one part of a DNS record, where the standard option for raw value is used
to specify the modified value. The following example demonstrates
a modification of MX record preference from 0 to 1 in a record without
modifying the exchanger:
ipa dnsrecord-mod --mx-rec="0 mx.example.com." --mx-preference=1
""") + _("""
EXAMPLES:
""") + _("""
Add new zone:
ipa dnszone-add example.com [email protected]
""") + _("""
Add system permission that can be used for per-zone privilege delegation:
ipa dnszone-add-permission example.com
""") + _("""
Modify the zone to allow dynamic updates for hosts own records in realm EXAMPLE.COM:
ipa dnszone-mod example.com --dynamic-update=TRUE
""") + _("""
This is the equivalent of:
ipa dnszone-mod example.com --dynamic-update=TRUE \\
--update-policy="grant EXAMPLE.COM krb5-self * A; grant EXAMPLE.COM krb5-self * AAAA; grant EXAMPLE.COM krb5-self * SSHFP;"
""") + _("""
Modify the zone to allow zone transfers for local network only:
ipa dnszone-mod example.com --allow-transfer=192.0.2.0/24
""") + _("""
Add new reverse zone specified by network IP address:
ipa dnszone-add --name-from-ip=192.0.2.0/24
""") + _("""
Add second nameserver for example.com:
ipa dnsrecord-add example.com @ --ns-rec=nameserver2.example.com
""") + _("""
Add a mail server for example.com:
ipa dnsrecord-add example.com @ --mx-rec="10 mail1"
""") + _("""
Add another record using MX record specific options:
ipa dnsrecord-add example.com @ --mx-preference=20 --mx-exchanger=mail2
""") + _("""
Add another record using interactive mode (started when dnsrecord-add, dnsrecord-mod,
or dnsrecord-del are executed with no options):
ipa dnsrecord-add example.com @
Please choose a type of DNS resource record to be added
The most common types for this type of zone are: NS, MX, LOC
DNS resource record type: MX
MX Preference: 30
MX Exchanger: mail3
Record name: example.com
MX record: 10 mail1, 20 mail2, 30 mail3
NS record: nameserver.example.com., nameserver2.example.com.
""") + _("""
Delete previously added nameserver from example.com:
ipa dnsrecord-del example.com @ --ns-rec=nameserver2.example.com.
""") + _("""
Add LOC record for example.com:
ipa dnsrecord-add example.com @ --loc-rec="49 11 42.4 N 16 36 29.6 E 227.64m"
""") + _("""
Add new A record for www.example.com. Create a reverse record in appropriate
reverse zone as well. In this case a PTR record "2" pointing to www.example.com
will be created in zone 2.0.192.in-addr.arpa.
ipa dnsrecord-add example.com www --a-rec=192.0.2.2 --a-create-reverse
""") + _("""
Add new PTR record for www.example.com
ipa dnsrecord-add 2.0.192.in-addr.arpa. 2 --ptr-rec=www.example.com.
""") + _("""
Add new SRV records for LDAP servers. Three quarters of the requests
should go to fast.example.com, one quarter to slow.example.com. If neither
is available, switch to backup.example.com.
ipa dnsrecord-add example.com _ldap._tcp --srv-rec="0 3 389 fast.example.com"
ipa dnsrecord-add example.com _ldap._tcp --srv-rec="0 1 389 slow.example.com"
ipa dnsrecord-add example.com _ldap._tcp --srv-rec="1 1 389 backup.example.com"
""") + _("""
The interactive mode can be used for easy modification:
ipa dnsrecord-mod example.com _ldap._tcp
No option to modify specific record provided.
Current DNS record contents:
SRV record: 0 3 389 fast.example.com, 0 1 389 slow.example.com, 1 1 389 backup.example.com
Modify SRV record '0 3 389 fast.example.com'? Yes/No (default No):
Modify SRV record '0 1 389 slow.example.com'? Yes/No (default No): y
SRV Priority [0]: (keep the default value)
SRV Weight [1]: 2 (modified value)
SRV Port [389]: (keep the default value)
SRV Target [slow.example.com]: (keep the default value)
1 SRV record skipped. Only one value per DNS record type can be modified at one time.
Record name: _ldap._tcp
SRV record: 0 3 389 fast.example.com, 1 1 389 backup.example.com, 0 2 389 slow.example.com
""") + _("""
After this modification, three fifths of the requests should go to
fast.example.com and two fifths to slow.example.com.
""") + _("""
An example of the interactive mode for dnsrecord-del command:
ipa dnsrecord-del example.com www
No option to delete specific record provided.
Delete all? Yes/No (default No): (do not delete all records)
Current DNS record contents:
A record: 192.0.2.2, 192.0.2.3
Delete A record '192.0.2.2'? Yes/No (default No):
Delete A record '192.0.2.3'? Yes/No (default No): y
Record name: www
A record: 192.0.2.2 (A record 192.0.2.3 has been deleted)
""") + _("""
Show zone example.com:
ipa dnszone-show example.com
""") + _("""
Find zone with "example" in its domain name:
ipa dnszone-find example
""") + _("""
Find records for resources with "www" in their name in zone example.com:
ipa dnsrecord-find example.com www
""") + _("""
Find A records with value 192.0.2.2 in zone example.com
ipa dnsrecord-find example.com --a-rec=192.0.2.2
""") + _("""
Show records for resource www in zone example.com
ipa dnsrecord-show example.com www
""") + _("""
Delegate zone sub.example to another nameserver:
ipa dnsrecord-add example.com ns.sub --a-rec=203.0.113.1
ipa dnsrecord-add example.com sub --ns-rec=ns.sub.example.com.
""") + _("""
Delete zone example.com with all resource records:
ipa dnszone-del example.com
""") + _("""
If a global forwarder is configured, all queries for which this server is not
authoritative (e.g. sub.example.com) will be routed to the global forwarder.
Global forwarding configuration can be overridden per-zone.
""") + _("""
Semantics of forwarding in IPA matches BIND semantics and depends on the type
of zone:
* Master zone: local BIND replies authoritatively to queries for data in
the given zone (including authoritative NXDOMAIN answers) and forwarding
affects only queries for names below zone cuts (NS records) of locally
served zones.
* Forward zone: forward zone contains no authoritative data. BIND forwards
queries, which cannot be answered from its local cache, to configured
forwarders.
""") + _("""
Semantics of the --forwarder-policy option:
* none - disable forwarding for the given zone.
* first - forward all queries to configured forwarders. If they fail,
do resolution using DNS root servers.
* only - forward all queries to configured forwarders and if they fail,
return failure.
""") + _("""
Disable global forwarding for given sub-tree:
ipa dnszone-mod example.com --forward-policy=none
""") + _("""
This configuration forwards all queries for names outside the example.com
sub-tree to global forwarders. Normal recursive resolution process is used
for names inside the example.com sub-tree (i.e. NS records are followed etc.).
""") + _("""
Forward all requests for the zone external.example.com to another forwarder
using a "first" policy (it will send the queries to the selected forwarder
and if not answered it will use global root servers):
ipa dnsforwardzone-add external.example.com --forward-policy=first \\
--forwarder=203.0.113.1
""") + _("""
Change forward-policy for external.example.com:
ipa dnsforwardzone-mod external.example.com --forward-policy=only
""") + _("""
Show forward zone external.example.com:
ipa dnsforwardzone-show external.example.com
""") + _("""
List all forward zones:
ipa dnsforwardzone-find
""") + _("""
Delete forward zone external.example.com:
ipa dnsforwardzone-del external.example.com
""") + _("""
Resolve a host name to see if it exists (will add default IPA domain
if one is not included):
ipa dns-resolve www.example.com
ipa dns-resolve www
""") + _("""
GLOBAL DNS CONFIGURATION
""") + _("""
DNS configuration passed to command line install script is stored in a local
configuration file on each IPA server where DNS service is configured. These
local settings can be overridden with a common configuration stored in LDAP
server:
""") + _("""
Show global DNS configuration:
ipa dnsconfig-show
""") + _("""
Modify global DNS configuration and set a list of global forwarders:
ipa dnsconfig-mod --forwarder=203.0.113.113
""")
register = Registry()
# supported resource record types
_record_types = (
u'A', u'AAAA', u'A6', u'AFSDB', u'APL', u'CERT', u'CNAME', u'DHCID', u'DLV',
u'DNAME', u'DS', u'HIP', u'HINFO', u'IPSECKEY', u'KEY', u'KX', u'LOC',
u'MD', u'MINFO', u'MX', u'NAPTR', u'NS', u'NSEC', u'NXT', u'PTR', u'RRSIG',
u'RP', u'SIG', u'SPF', u'SRV', u'SSHFP', u'TLSA', u'TXT',
)
# DNS zone record identificator
_dns_zone_record = DNSName.empty
# most used record types, always ask for those in interactive prompt
_top_record_types = ('A', 'AAAA', )
_rev_top_record_types = ('PTR', )
_zone_top_record_types = ('NS', 'MX', 'LOC', )
# attributes derived from record types
_record_attributes = [str('%srecord' % t.lower()) for t in _record_types]
# Deprecated
# supported DNS classes, IN = internet, rest is almost never used
_record_classes = (u'IN', u'CS', u'CH', u'HS')
# IN record class
_IN = dns.rdataclass.IN
# NS record type
_NS = dns.rdatatype.from_text('NS')
_output_permissions = (
output.summary,
output.Output('result', bool, _('True means the operation was successful')),
output.Output('value', unicode, _('Permission value')),
)
def _rname_validator(ugettext, zonemgr):
try:
DNSName(zonemgr) # test only if it is valid domain name
except (ValueError, dns.exception.SyntaxError) as e:
return unicode(e)
return None
def _create_zone_serial():
"""
Generate serial number for zones. bind-dyndb-ldap expects unix time in
to be used for SOA serial.
SOA serial in a date format would also work, but it may be set to far
future when many DNS updates are done per day (more than 100). Unix
timestamp is more resilient to this issue.
"""
return int(time.time())
def _reverse_zone_name(netstr):
try:
netaddr.IPAddress(str(netstr))
except (netaddr.AddrFormatError, ValueError):
pass
else:
# use more sensible default prefix than netaddr default
return unicode(get_reverse_zone_default(netstr))
net = netaddr.IPNetwork(netstr)
items = net.ip.reverse_dns.split('.')
if net.version == 4:
return u'.'.join(items[4 - net.prefixlen / 8:])
elif net.version == 6:
return u'.'.join(items[32 - net.prefixlen / 4:])
else:
return None
def _validate_ipaddr(ugettext, ipaddr, ip_version=None):
try:
ip = netaddr.IPAddress(str(ipaddr), flags=netaddr.INET_PTON)
if ip_version is not None:
if ip.version != ip_version:
return _('invalid IP address version (is %(value)d, must be %(required_value)d)!') \
% dict(value=ip.version, required_value=ip_version)
except (netaddr.AddrFormatError, ValueError):
return _('invalid IP address format')
return None
def _validate_ip4addr(ugettext, ipaddr):
return _validate_ipaddr(ugettext, ipaddr, 4)
def _validate_ip6addr(ugettext, ipaddr):
return _validate_ipaddr(ugettext, ipaddr, 6)
def _validate_ipnet(ugettext, ipnet):
try:
net = netaddr.IPNetwork(ipnet)
except (netaddr.AddrFormatError, ValueError, UnboundLocalError):
return _('invalid IP network format')
return None
def _validate_bind_aci(ugettext, bind_acis):
if not bind_acis:
return
bind_acis = bind_acis.split(';')
if bind_acis[-1]:
return _('each ACL element must be terminated with a semicolon')
else:
bind_acis.pop(-1)
for bind_aci in bind_acis:
if bind_aci in ("any", "none", "localhost", "localnets"):
continue
if bind_aci.startswith('!'):
bind_aci = bind_aci[1:]
try:
ip = CheckedIPAddress(bind_aci, parse_netmask=True,
allow_network=True, allow_loopback=True)
except (netaddr.AddrFormatError, ValueError), e:
return unicode(e)
except UnboundLocalError:
return _(u"invalid address format")
def _normalize_bind_aci(bind_acis):
if not bind_acis:
return
bind_acis = bind_acis.split(';')
normalized = []
for bind_aci in bind_acis:
if not bind_aci:
continue
if bind_aci in ("any", "none", "localhost", "localnets"):
normalized.append(bind_aci)
continue
prefix = ""
if bind_aci.startswith('!'):
bind_aci = bind_aci[1:]
prefix = "!"
try:
ip = CheckedIPAddress(bind_aci, parse_netmask=True,
allow_network=True, allow_loopback=True)
if '/' in bind_aci: # addr with netmask
netmask = "/%s" % ip.prefixlen
else:
netmask = ""
normalized.append(u"%s%s%s" % (prefix, str(ip), netmask))
continue
except:
normalized.append(bind_aci)
continue
acis = u';'.join(normalized)
acis += u';'
return acis
def _validate_bind_forwarder(ugettext, forwarder):
ip_address, sep, port = forwarder.partition(u' port ')
ip_address_validation = _validate_ipaddr(ugettext, ip_address)
if ip_address_validation is not None:
return ip_address_validation
if sep:
try:
port = int(port)
if port < 0 or port > 65535:
raise ValueError()
except ValueError:
return _('%(port)s is not a valid port' % dict(port=port))
return None
def _validate_nsec3param_record(ugettext, value):
_nsec3param_pattern = (r'^(?P<alg>\d+) (?P<flags>\d+) (?P<iter>\d+) '
r'(?P<salt>([0-9a-fA-F]{2})+|-)$')
rec = re.compile(_nsec3param_pattern, flags=re.U)
result = rec.match(value)
if result is None:
return _(u'expected format: <0-255> <0-255> <0-65535> '
'even-length_hexadecimal_digits_or_hyphen')
alg = int(result.group('alg'))
flags = int(result.group('flags'))
iterations = int(result.group('iter'))
salt = result.group('salt')
if alg > 255:
return _('algorithm value: allowed interval 0-255')
if flags > 255:
return _('flags value: allowed interval 0-255')
if iterations > 65535:
return _('iterations value: allowed interval 0-65535')
if salt == u'-':
return None
try:
binascii.a2b_hex(salt)
except TypeError, e:
return _('salt value: %(err)s') % {'err': e}
return None
def _hostname_validator(ugettext, value):
assert isinstance(value, DNSName)
if len(value.make_absolute().labels) < 3:
return _('invalid domain-name: not fully qualified')
return None
def _no_wildcard_validator(ugettext, value):
"""Disallow usage of wildcards as RFC 4592 section 4 recommends
"""
assert isinstance(value, DNSName)
if value.is_wild():
return _('should not be a wildcard domain name (RFC 4592 section 4)')
return None
def is_forward_record(zone, str_address):
addr = netaddr.IPAddress(str_address)
if addr.version == 4:
result = api.Command['dnsrecord_find'](zone, arecord=str_address)
elif addr.version == 6:
result = api.Command['dnsrecord_find'](zone, aaaarecord=str_address)
else:
raise ValueError('Invalid address family')
return result['count'] > 0
def add_forward_record(zone, name, str_address):
addr = netaddr.IPAddress(str_address)
try:
if addr.version == 4:
api.Command['dnsrecord_add'](zone, name, arecord=str_address)
elif addr.version == 6:
api.Command['dnsrecord_add'](zone, name, aaaarecord=str_address)
else:
raise ValueError('Invalid address family')
except errors.EmptyModlist:
pass # the entry already exists and matches
def get_reverse_zone(ipaddr, prefixlen=None):
ip = netaddr.IPAddress(str(ipaddr))
revdns = DNSName(unicode(ip.reverse_dns))
if prefixlen is None:
revzone = None
result = api.Command['dnszone_find']()['result']
for zone in result:
zonename = zone['idnsname'][0]
if (revdns.is_subdomain(zonename.make_absolute()) and
(revzone is None or zonename.is_subdomain(revzone))):
revzone = zonename
else:
if ip.version == 4:
pos = 4 - prefixlen / 8
elif ip.version == 6:
pos = 32 - prefixlen / 4
items = ip.reverse_dns.split('.')
revzone = DNSName(items[pos:])
try:
api.Command['dnszone_show'](revzone)
except errors.NotFound:
revzone = None
if revzone is None:
raise errors.NotFound(
reason=_('DNS reverse zone for IP address %(addr)s not found') % dict(addr=ipaddr)
)
revname = revdns.relativize(revzone)
return revzone, revname
def add_records_for_host_validation(option_name, host, domain, ip_addresses, check_forward=True, check_reverse=True):
assert isinstance(host, DNSName)
assert isinstance(domain, DNSName)
try:
api.Command['dnszone_show'](domain)['result']
except errors.NotFound:
raise errors.NotFound(
reason=_('DNS zone %(zone)s not found') % dict(zone=domain)
)
if not isinstance(ip_addresses, (tuple, list)):
ip_addresses = [ip_addresses]
for ip_address in ip_addresses:
try:
ip = CheckedIPAddress(ip_address, match_local=False)
except Exception, e:
raise errors.ValidationError(name=option_name, error=unicode(e))
if check_forward:
if is_forward_record(domain, unicode(ip)):
raise errors.DuplicateEntry(
message=_(u'IP address %(ip)s is already assigned in domain %(domain)s.')\
% dict(ip=str(ip), domain=domain))
if check_reverse:
try:
prefixlen = None
if not ip.defaultnet:
prefixlen = ip.prefixlen
# we prefer lookup of the IP through the reverse zone
revzone, revname = get_reverse_zone(ip, prefixlen)
reverse = api.Command['dnsrecord_find'](revzone, idnsname=revname)
if reverse['count'] > 0:
raise errors.DuplicateEntry(
message=_(u'Reverse record for IP address %(ip)s already exists in reverse zone %(zone)s.')\
% dict(ip=str(ip), zone=revzone))
except errors.NotFound:
pass
def add_records_for_host(host, domain, ip_addresses, add_forward=True, add_reverse=True):
assert isinstance(host, DNSName)
assert isinstance(domain, DNSName)
if not isinstance(ip_addresses, (tuple, list)):
ip_addresses = [ip_addresses]
for ip_address in ip_addresses:
ip = CheckedIPAddress(ip_address, match_local=False)
if add_forward:
add_forward_record(domain, host, unicode(ip))
if add_reverse:
try:
prefixlen = None
if not ip.defaultnet:
prefixlen = ip.prefixlen
revzone, revname = get_reverse_zone(ip, prefixlen)
addkw = {'ptrrecord': host.derelativize(domain).ToASCII()}
api.Command['dnsrecord_add'](revzone, revname, **addkw)
except errors.EmptyModlist:
# the entry already exists and matches
pass
def _dns_name_to_string(value, raw=False):
if isinstance(value, unicode):
try:
value = DNSName(value)
except Exception:
return value
assert isinstance(value, DNSName)
if raw:
return value.ToASCII()
else:
return unicode(value)
def _check_entry_objectclass(entry, objectclasses):
"""
Check if entry contains all objectclasses
"""
if not isinstance(objectclasses, (list, tuple)):
objectclasses = [objectclasses, ]
if not entry.get('objectclass'):
return False
entry_objectclasses = [o.lower() for o in entry['objectclass']]
for o in objectclasses:
if o not in entry_objectclasses:
return False
return True
def _check_DN_objectclass(ldap, dn, objectclasses):
try:
entry = ldap.get_entry(dn, [u'objectclass', ])
except Exception:
return False
else:
return _check_entry_objectclass(entry, objectclasses)
class DNSRecord(Str):
# a list of parts that create the actual raw DNS record
parts = None
# an optional list of parameters used in record-specific operations
extra = None
supported = True
# supported RR types: https://fedorahosted.org/bind-dyndb-ldap/browser/doc/schema
label_format = _("%s record")
part_label_format = "%s %s"
doc_format = _('Raw %s records')
option_group_format = _('%s Record')
see_rfc_msg = _("(see RFC %s for details)")
part_name_format = "%s_part_%s"
extra_name_format = "%s_extra_%s"
cli_name_format = "%s_%s"
format_error_msg = None
kwargs = Str.kwargs + (
('validatedns', bool, True),
('normalizedns', bool, True),
)
# should be replaced in subclasses
rrtype = None
rfc = None
def __init__(self, name=None, *rules, **kw):
if self.rrtype not in _record_types:
raise ValueError("Unknown RR type: %s. Must be one of %s" % \
(str(self.rrtype), ", ".join(_record_types)))
if not name:
name = "%srecord*" % self.rrtype.lower()
kw.setdefault('cli_name', '%s_rec' % self.rrtype.lower())
kw.setdefault('label', self.label_format % self.rrtype)
kw.setdefault('doc', self.doc_format % self.rrtype)
kw.setdefault('option_group', self.option_group_format % self.rrtype)
kw['csv'] = True
if not self.supported:
kw['flags'] = ('no_option',)
super(DNSRecord, self).__init__(name, *rules, **kw)
def _get_part_values(self, value):
values = value.split()
if len(values) != len(self.parts):
return None
return tuple(values)
def _part_values_to_string(self, values, index, idna=True):
self._validate_parts(values)
parts = []
for v in values:
if v is None:
continue
elif isinstance(v, DNSName) and idna:
v = v.ToASCII()
elif not isinstance(v, unicode):
v = unicode(v)
parts.append(v)
return u" ".join(parts)
def get_parts_from_kw(self, kw, raise_on_none=True):
part_names = tuple(self.part_name_format % (self.rrtype.lower(), part.name) \
for part in self.parts)
vals = tuple(kw.get(part_name) for part_name in part_names)
if all(val is None for val in vals):
return
if raise_on_none:
for val_id,val in enumerate(vals):
if val is None and self.parts[val_id].required:
cli_name = self.cli_name_format % (self.rrtype.lower(), self.parts[val_id].name)
raise errors.ConversionError(name=self.name,
error=_("'%s' is a required part of DNS record") % cli_name)
return vals
def _validate_parts(self, parts):
if len(parts) != len(self.parts):
raise errors.ValidationError(name=self.name,
error=_("Invalid number of parts!"))
def _convert_scalar(self, value, index=None):
if isinstance(value, (tuple, list)):
return self._part_values_to_string(value, index)
return super(DNSRecord, self)._convert_scalar(value, index)
def normalize(self, value):
if self.normalizedns:
if isinstance(value, (tuple, list)):
value = tuple(
self._normalize_parts(v) for v in value \
if v is not None
)
elif value is not None:
value = (self._normalize_parts(value),)
return super(DNSRecord, self).normalize(value)
def _normalize_parts(self, value):
"""
Normalize a DNS record value using normalizers for its parts.
"""
if self.parts is None:
return value
try:
values = self._get_part_values(value)
if not values:
return value
converted_values = [ part._convert_scalar(values[part_id]) \
if values[part_id] is not None else None
for part_id, part in enumerate(self.parts)
]
new_values = [ part.normalize(converted_values[part_id]) \
for part_id, part in enumerate(self.parts) ]
value = self._convert_scalar(new_values)
except Exception:
# cannot normalize, rather return original value than fail
pass
return value
def _rule_validatedns(self, _, value):
if not self.validatedns:
return
if value is None:
return
if value is None:
return
if not self.supported:
return _('DNS RR type "%s" is not supported by bind-dyndb-ldap plugin') \
% self.rrtype
if self.parts is None:
return
# validate record format
values = self._get_part_values(value)
if not values:
if not self.format_error_msg:
part_names = [part.name.upper() for part in self.parts]
if self.rfc:
see_rfc_msg = " " + self.see_rfc_msg % self.rfc
else:
see_rfc_msg = ""
return _('format must be specified as "%(format)s" %(rfcs)s') \
% dict(format=" ".join(part_names), rfcs=see_rfc_msg)
else:
return self.format_error_msg
# validate every part
for part_id, part in enumerate(self.parts):
val = part.normalize(values[part_id])
val = part.convert(val)
part.validate(val)
return None
def _convert_dnsrecord_part(self, part):
"""
All parts of DNSRecord need to be processed and modified before they
can be added to global DNS API. For example a prefix need to be added
before part name so that the name is unique in the global namespace.
"""
name = self.part_name_format % (self.rrtype.lower(), part.name)
cli_name = self.cli_name_format % (self.rrtype.lower(), part.name)
label = self.part_label_format % (self.rrtype, unicode(part.label))
option_group = self.option_group_format % self.rrtype
flags = list(part.flags) + ['dnsrecord_part', 'virtual_attribute',]
if not part.required:
flags.append('dnsrecord_optional')
if not self.supported:
flags.append("no_option")
return part.clone_rename(name,
cli_name=cli_name,
label=label,
required=False,
option_group=option_group,
flags=flags,
hint=self.name,) # name of parent RR param
def _convert_dnsrecord_extra(self, extra):
"""
Parameters for special per-type behavior need to be processed in the
same way as record parts in _convert_dnsrecord_part().
"""
name = self.extra_name_format % (self.rrtype.lower(), extra.name)
cli_name = self.cli_name_format % (self.rrtype.lower(), extra.name)
label = self.part_label_format % (self.rrtype, unicode(extra.label))
option_group = self.option_group_format % self.rrtype
flags = list(extra.flags) + ['dnsrecord_extra', 'virtual_attribute',]
return extra.clone_rename(name,
cli_name=cli_name,
label=label,
required=False,
option_group=option_group,
flags=flags,
hint=self.name,) # name of parent RR param
def get_parts(self):
if self.parts is None:
return tuple()
return tuple(self._convert_dnsrecord_part(part) for part in self.parts)
def get_extra(self):
if self.extra is None:
return tuple()
return tuple(self._convert_dnsrecord_extra(extra) for extra in self.extra)
def __get_part_param(self, cmd, part, output_kw, default=None):
name = self.part_name_format % (self.rrtype.lower(), part.name)
label = self.part_label_format % (self.rrtype, unicode(part.label))
optional = not part.required
output_kw[name] = cmd.prompt_param(part,
optional=optional,
label=label)
def prompt_parts(self, cmd, mod_dnsvalue=None):
mod_parts = None
if mod_dnsvalue is not None:
mod_parts = self._get_part_values(mod_dnsvalue)
user_options = {}
if self.parts is None:
return user_options
for part_id, part in enumerate(self.parts):
if mod_parts:
default = mod_parts[part_id]
else:
default = None
self.__get_part_param(cmd, part, user_options, default)
return user_options
def prompt_missing_parts(self, cmd, kw, prompt_optional=False):
user_options = {}
if self.parts is None:
return user_options
for part in self.parts:
name = self.part_name_format % (self.rrtype.lower(), part.name)
if name in kw:
continue
optional = not part.required
if optional and not prompt_optional:
continue
default = part.get_default(**kw)
self.__get_part_param(cmd, part, user_options, default)
return user_options
# callbacks for per-type special record behavior
def dnsrecord_add_pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
def dnsrecord_add_post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
class ForwardRecord(DNSRecord):
extra = (
Flag('create_reverse?',
label=_('Create reverse'),
doc=_('Create reverse record for this IP Address'),
flags=['no_update']
),
)
def dnsrecord_add_pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
reverse_option = self._convert_dnsrecord_extra(self.extra[0])
if options.get(reverse_option.name):
records = entry_attrs.get(self.name, [])
if not records:
# --<rrtype>-create-reverse is set, but there are not records
raise errors.RequirementError(name=self.name)
for record in records:
add_records_for_host_validation(self.name, keys[-1], keys[-2], record,
check_forward=False,
check_reverse=True)
setattr(context, '%s_reverse' % self.name, entry_attrs.get(self.name))
def dnsrecord_add_post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
rev_records = getattr(context, '%s_reverse' % self.name, [])
if rev_records:
# make sure we don't run this post callback action again in nested
# commands, line adding PTR record in add_records_for_host
delattr(context, '%s_reverse' % self.name)
for record in rev_records:
try:
add_records_for_host(keys[-1], keys[-2], record,
add_forward=False, add_reverse=True)
except Exception, e:
raise errors.NonFatalError(
reason=_('Cannot create reverse record for "%(value)s": %(exc)s') \
% dict(value=record, exc=unicode(e)))
class UnsupportedDNSRecord(DNSRecord):
"""
Records which are not supported by IPA CLI, but we allow to show them if
LDAP contains these records.
"""
supported = False
def _get_part_values(self, value):
return tuple()
class ARecord(ForwardRecord):
rrtype = 'A'
rfc = 1035
parts = (
Str('ip_address',
_validate_ip4addr,
label=_('IP Address'),
),
)
class A6Record(DNSRecord):
rrtype = 'A6'
rfc = 3226
parts = (
Str('data',
label=_('Record data'),
),
)
def _get_part_values(self, value):
# A6 RR type is obsolete and only a raw interface is provided
return (value,)
class AAAARecord(ForwardRecord):
rrtype = 'AAAA'
rfc = 3596
parts = (
Str('ip_address',
_validate_ip6addr,
label=_('IP Address'),
),
)
class AFSDBRecord(DNSRecord):
rrtype = 'AFSDB'
rfc = 1183
parts = (
Int('subtype?',
label=_('Subtype'),
minvalue=0,
maxvalue=65535,
),
DNSNameParam('hostname',
label=_('Hostname'),
),
)
class APLRecord(UnsupportedDNSRecord):
rrtype = 'APL'
rfc = 3123
class CERTRecord(DNSRecord):
rrtype = 'CERT'
rfc = 4398
parts = (
Int('type',
label=_('Certificate Type'),
minvalue=0,
maxvalue=65535,
),
Int('key_tag',
label=_('Key Tag'),
minvalue=0,
maxvalue=65535,
),
Int('algorithm',
label=_('Algorithm'),
minvalue=0,
maxvalue=255,
),
Str('certificate_or_crl',
label=_('Certificate/CRL'),
),
)
class CNAMERecord(DNSRecord):
rrtype = 'CNAME'
rfc = 1035
parts = (
DNSNameParam('hostname',
label=_('Hostname'),
doc=_('A hostname which this alias hostname points to'),
),
)
class DHCIDRecord(UnsupportedDNSRecord):
rrtype = 'DHCID'
rfc = 4701
class DNAMERecord(DNSRecord):
rrtype = 'DNAME'
rfc = 2672
parts = (
DNSNameParam('target',
label=_('Target'),
),
)
class DSRecord(DNSRecord):
rrtype = 'DS'
rfc = 4034
parts = (
Int('key_tag',
label=_('Key Tag'),
minvalue=0,
maxvalue=65535,
),
Int('algorithm',
label=_('Algorithm'),
minvalue=0,
maxvalue=255,
),
Int('digest_type',
label=_('Digest Type'),
minvalue=0,
maxvalue=255,
),
Str('digest',
label=_('Digest'),
pattern=r'^[0-9a-fA-F]+$',
pattern_errmsg=u'only hexadecimal digits are allowed'
),
)
class DLVRecord(DSRecord):
# must use same attributes as DSRecord
rrtype = 'DLV'
rfc = 4431
class HINFORecord(UnsupportedDNSRecord):
rrtype = 'HINFO'
rfc = 1035
class HIPRecord(UnsupportedDNSRecord):
rrtype = 'HIP'
rfc = 5205
class KEYRecord(UnsupportedDNSRecord):
# managed by BIND itself
rrtype = 'KEY'
rfc = 2535
class IPSECKEYRecord(UnsupportedDNSRecord):
rrtype = 'IPSECKEY'
rfc = 4025
class KXRecord(DNSRecord):
rrtype = 'KX'
rfc = 2230
parts = (
Int('preference',
label=_('Preference'),
doc=_('Preference given to this exchanger. Lower values are more preferred'),
minvalue=0,
maxvalue=65535,
),
DNSNameParam('exchanger',
label=_('Exchanger'),
doc=_('A host willing to act as a key exchanger'),
),
)
class LOCRecord(DNSRecord):
rrtype = 'LOC'
rfc = 1876
parts = (
Int('lat_deg',
label=_('Degrees Latitude'),
minvalue=0,
maxvalue=90,
),
Int('lat_min?',
label=_('Minutes Latitude'),
minvalue=0,
maxvalue=59,
),
Decimal('lat_sec?',
label=_('Seconds Latitude'),
minvalue='0.0',
maxvalue='59.999',
precision=3,
),
StrEnum('lat_dir',
label=_('Direction Latitude'),
values=(u'N', u'S',),
),
Int('lon_deg',
label=_('Degrees Longitude'),
minvalue=0,
maxvalue=180,
),
Int('lon_min?',
label=_('Minutes Longitude'),
minvalue=0,
maxvalue=59,
),
Decimal('lon_sec?',
label=_('Seconds Longitude'),
minvalue='0.0',
maxvalue='59.999',
precision=3,
),
StrEnum('lon_dir',
label=_('Direction Longitude'),
values=(u'E', u'W',),
),
Decimal('altitude',
label=_('Altitude'),
minvalue='-100000.00',
maxvalue='42849672.95',
precision=2,
),
Decimal('size?',
label=_('Size'),
minvalue='0.0',
maxvalue='90000000.00',
precision=2,
),
Decimal('h_precision?',
label=_('Horizontal Precision'),
minvalue='0.0',
maxvalue='90000000.00',
precision=2,
),
Decimal('v_precision?',
label=_('Vertical Precision'),
minvalue='0.0',
maxvalue='90000000.00',
precision=2,
),
)
format_error_msg = _("""format must be specified as
"d1 [m1 [s1]] {"N"|"S"} d2 [m2 [s2]] {"E"|"W"} alt["m"] [siz["m"] [hp["m"] [vp["m"]]]]"
where:
d1: [0 .. 90] (degrees latitude)
d2: [0 .. 180] (degrees longitude)
m1, m2: [0 .. 59] (minutes latitude/longitude)
s1, s2: [0 .. 59.999] (seconds latitude/longitude)
alt: [-100000.00 .. 42849672.95] BY .01 (altitude in meters)
siz, hp, vp: [0 .. 90000000.00] (size/precision in meters)
See RFC 1876 for details""")
def _get_part_values(self, value):
regex = re.compile(
r'(?P<d1>\d{1,2}\s+)'
r'(?:(?P<m1>\d{1,2}\s+)'
r'(?P<s1>\d{1,2}(?:\.\d{1,3})?\s+)?)?'
r'(?P<dir1>[NS])\s+'
r'(?P<d2>\d{1,3}\s+)'
r'(?:(?P<m2>\d{1,2}\s+)'
r'(?P<s2>\d{1,2}(?:\.\d{1,3})?\s+)?)?'
r'(?P<dir2>[WE])\s+'
r'(?P<alt>-?\d{1,8}(?:\.\d{1,2})?)m?'
r'(?:\s+(?P<siz>\d{1,8}(?:\.\d{1,2})?)m?'
r'(?:\s+(?P<hp>\d{1,8}(?:\.\d{1,2})?)m?'
r'(?:\s+(?P<vp>\d{1,8}(?:\.\d{1,2})?)m?\s*)?)?)?$')
m = regex.match(value)
if m is None:
return None
return tuple(x.strip() if x is not None else x for x in m.groups())
def _validate_parts(self, parts):
super(LOCRecord, self)._validate_parts(parts)
# create part_name -> part_id map first
part_name_map = dict((part.name, part_id) \
for part_id,part in enumerate(self.parts))
requirements = ( ('lat_sec', 'lat_min'),
('lon_sec', 'lon_min'),
('h_precision', 'size'),
('v_precision', 'h_precision', 'size') )
for req in requirements:
target_part = req[0]
if parts[part_name_map[target_part]] is not None:
required_parts = req[1:]
if any(parts[part_name_map[part]] is None for part in required_parts):
target_cli_name = self.cli_name_format % (self.rrtype.lower(), req[0])
required_cli_names = [ self.cli_name_format % (self.rrtype.lower(), part)
for part in req[1:] ]
error = _("'%(required)s' must not be empty when '%(name)s' is set") % \
dict(required=', '.join(required_cli_names),
name=target_cli_name)
raise errors.ValidationError(name=self.name, error=error)
class MDRecord(UnsupportedDNSRecord):
# obsoleted, use MX instead
rrtype = 'MD'
rfc = 1035
class MINFORecord(UnsupportedDNSRecord):
rrtype = 'MINFO'
rfc = 1035
class MXRecord(DNSRecord):
rrtype = 'MX'
rfc = 1035
parts = (
Int('preference',
label=_('Preference'),
doc=_('Preference given to this exchanger. Lower values are more preferred'),
minvalue=0,
maxvalue=65535,
),
DNSNameParam('exchanger',
label=_('Exchanger'),
doc=_('A host willing to act as a mail exchanger'),
),
)
class NSRecord(DNSRecord):
rrtype = 'NS'
rfc = 1035
parts = (
DNSNameParam('hostname',
label=_('Hostname'),
),
)
class NSECRecord(UnsupportedDNSRecord):
# managed by BIND itself
rrtype = 'NSEC'
rfc = 4034
def _validate_naptr_flags(ugettext, flags):
allowed_flags = u'SAUP'
flags = flags.replace('"','').replace('\'','')
for flag in flags:
if flag not in allowed_flags:
return _('flags must be one of "S", "A", "U", or "P"')
class NAPTRRecord(DNSRecord):
rrtype = 'NAPTR'
rfc = 2915
parts = (
Int('order',
label=_('Order'),
minvalue=0,
maxvalue=65535,
),
Int('preference',
label=_('Preference'),
minvalue=0,
maxvalue=65535,
),
Str('flags',
_validate_naptr_flags,
label=_('Flags'),
normalizer=lambda x:x.upper()
),
Str('service',
label=_('Service'),
),
Str('regexp',
label=_('Regular Expression'),
),
Str('replacement',
label=_('Replacement'),
),
)
class NXTRecord(UnsupportedDNSRecord):
rrtype = 'NXT'
rfc = 2535
class PTRRecord(DNSRecord):
rrtype = 'PTR'
rfc = 1035
parts = (
DNSNameParam('hostname',
#RFC 2317 section 5.2 -- can be relative
label=_('Hostname'),
doc=_('The hostname this reverse record points to'),
),
)
class RPRecord(UnsupportedDNSRecord):
rrtype = 'RP'
rfc = 1183
class SRVRecord(DNSRecord):
rrtype = 'SRV'
rfc = 2782
parts = (
Int('priority',
label=_('Priority'),
minvalue=0,
maxvalue=65535,
),
Int('weight',
label=_('Weight'),
minvalue=0,
maxvalue=65535,
),
Int('port',
label=_('Port'),
minvalue=0,
maxvalue=65535,
),
DNSNameParam('target',
label=_('Target'),
doc=_('The domain name of the target host or \'.\' if the service is decidedly not available at this domain'),
),
)
def _sig_time_validator(ugettext, value):
time_format = "%Y%m%d%H%M%S"
try:
time.strptime(value, time_format)
except ValueError:
return _('the value does not follow "YYYYMMDDHHMMSS" time format')
class SIGRecord(UnsupportedDNSRecord):
# managed by BIND itself
rrtype = 'SIG'
rfc = 2535
class SPFRecord(UnsupportedDNSRecord):
rrtype = 'SPF'
rfc = 4408
class RRSIGRecord(UnsupportedDNSRecord):
# managed by BIND itself
rrtype = 'RRSIG'
rfc = 4034
class SSHFPRecord(DNSRecord):
rrtype = 'SSHFP'
rfc = 4255
parts = (
Int('algorithm',
label=_('Algorithm'),
minvalue=0,
maxvalue=255,
),
Int('fp_type',
label=_('Fingerprint Type'),
minvalue=0,
maxvalue=255,
),
Str('fingerprint',
label=_('Fingerprint'),
),
)
def _get_part_values(self, value):
# fingerprint part can contain space in LDAP, return it as one part
values = value.split(None, 2)
if len(values) != len(self.parts):
return None
return tuple(values)
class TLSARecord(DNSRecord):
rrtype = 'TLSA'
rfc = 6698
parts = (
Int('cert_usage',
label=_('Certificate Usage'),
minvalue=0,
maxvalue=255,
),
Int('selector',
label=_('Selector'),
minvalue=0,
maxvalue=255,
),
Int('matching_type',
label=_('Matching Type'),
minvalue=0,
maxvalue=255,
),
Str('cert_association_data',
label=_('Certificate Association Data'),
),
)
class TXTRecord(DNSRecord):
rrtype = 'TXT'
rfc = 1035
parts = (
Str('data',
label=_('Text Data'),
),
)
def _get_part_values(self, value):
# ignore any space in TXT record
return (value,)
_dns_records = (
ARecord(),
AAAARecord(),
A6Record(),
AFSDBRecord(),
APLRecord(),
CERTRecord(),
CNAMERecord(),
DHCIDRecord(),
DLVRecord(),
DNAMERecord(),
DSRecord(),
HIPRecord(),
IPSECKEYRecord(),
KEYRecord(),
KXRecord(),
LOCRecord(),
MXRecord(),
NAPTRRecord(),
NSRecord(),
NSECRecord(),
PTRRecord(),
RRSIGRecord(),
RPRecord(),
SIGRecord(),
SPFRecord(),
SRVRecord(),
SSHFPRecord(),
TLSARecord(),
TXTRecord(),
)
def __dns_record_options_iter():
for opt in (Any('dnsrecords?',
label=_('Records'),
flags=['no_create', 'no_search', 'no_update'],),
Str('dnstype?',
label=_('Record type'),
flags=['no_create', 'no_search', 'no_update'],),
Str('dnsdata?',
label=_('Record data'),
flags=['no_create', 'no_search', 'no_update'],)):
# These 3 options are used in --structured format. They are defined
# rather in takes_params than has_output_params because of their
# order - they should be printed to CLI before any DNS part param
yield opt
for option in _dns_records:
yield option
for part in option.get_parts():
yield part
for extra in option.get_extra():
yield extra
_dns_record_options = tuple(__dns_record_options_iter())
_dns_supported_record_types = tuple(record.rrtype for record in _dns_records \
if record.supported)
def check_ns_rec_resolvable(zone, name):
assert isinstance(zone, DNSName)
assert isinstance(name, DNSName)
if name.is_empty():
name = zone.make_absolute()
elif not name.is_absolute():
# this is a DNS name relative to the zone
name = name.derelativize(zone.make_absolute())
try:
return api.Command['dns_resolve'](unicode(name))
except errors.NotFound:
raise errors.NotFound(
reason=_('Nameserver \'%(host)s\' does not have a corresponding '
'A/AAAA record') % {'host': name}
)
def dns_container_exists(ldap):
try:
ldap.get_entry(DN(api.env.container_dns, api.env.basedn), [])
except errors.NotFound:
return False
return True
def default_zone_update_policy(zone):
if zone.is_reverse():
return get_dns_reverse_zone_update_policy(api.env.realm, zone.ToASCII())
else:
return get_dns_forward_zone_update_policy(api.env.realm)
dnszone_output_params = (
Str('managedby',
label=_('Managedby permission'),
),
)
def _convert_to_idna(value):
"""
Function converts a unicode value to idna, without extra validation.
If conversion fails, None is returned
"""
assert isinstance(value, unicode)
try:
idna_val = value
start_dot = u''
end_dot = u''
if idna_val.startswith(u'.'):
idna_val = idna_val[1:]
start_dot = u'.'
if idna_val.endswith(u'.'):
idna_val = idna_val[:-1]
end_dot = u'.'
idna_val = encodings.idna.nameprep(idna_val)
idna_val = re.split(r'(?<!\\)\.', idna_val)
idna_val = u'%s%s%s' % (start_dot,
u'.'.join(encodings.idna.ToASCII(x)
for x in idna_val),
end_dot)
return idna_val
except Exception:
pass
return None
def _create_idn_filter(cmd, ldap, *args, **options):
term = args[-1]
if term:
#include idna values to search
term_idna = _convert_to_idna(term)
if term_idna and term != term_idna:
term = (term, term_idna)
search_kw = {}
attr_extra_filters = []
for attr, value in cmd.args_options_2_entry(**options).iteritems():
if not isinstance(value, list):
value = [value]
for i, v in enumerate(value):
if isinstance(v, DNSName):
value[i] = v.ToASCII()
elif attr in map_names_to_records:
record = map_names_to_records[attr]
parts = record._get_part_values(v)
if parts is None:
value[i] = v
continue
try:
value[i] = record._part_values_to_string(parts, None)
except errors.ValidationError:
value[i] = v
#create MATCH_ANY filter for multivalue
if len(value) > 1:
f = ldap.make_filter({attr: value}, rules=ldap.MATCH_ANY)
attr_extra_filters.append(f)
else:
search_kw[attr] = value
if cmd.obj.search_attributes:
search_attrs = cmd.obj.search_attributes
else:
search_attrs = cmd.obj.default_attributes
if cmd.obj.search_attributes_config:
config = ldap.get_ipa_config()
config_attrs = config.get(cmd.obj.search_attributes_config, [])
if len(config_attrs) == 1 and (isinstance(config_attrs[0],
basestring)):
search_attrs = config_attrs[0].split(',')
search_kw['objectclass'] = cmd.obj.object_class
attr_filter = ldap.make_filter(search_kw, rules=ldap.MATCH_ALL)
if attr_extra_filters:
#combine filter if there is any idna value
attr_extra_filters.append(attr_filter)
attr_filter = ldap.combine_filters(attr_extra_filters,
rules=ldap.MATCH_ALL)
search_kw = {}
for a in search_attrs:
search_kw[a] = term
term_filter = ldap.make_filter(search_kw, exact=False)
member_filter = cmd.get_member_filter(ldap, **options)
filter = ldap.combine_filters(
(term_filter, attr_filter, member_filter), rules=ldap.MATCH_ALL
)
return filter
map_names_to_records = {"%srecord" % record.rrtype.lower(): record for record
in _dns_records if record.supported}
def _records_idn_postprocess(record, **options):
for attr in record.keys():
attr = attr.lower()
try:
param = map_names_to_records[attr]
except KeyError:
continue
if not isinstance(param, DNSRecord):
continue
part_params = param.get_parts()
rrs = []
for dnsvalue in record[attr]:
parts = param._get_part_values(dnsvalue)
if parts is None:
continue
parts = list(parts)
try:
for (i, p) in enumerate(parts):
if isinstance(part_params[i], DNSNameParam):
parts[i] = DNSName(p)
rrs.append(param._part_values_to_string(parts, None,
idna=options.get('raw', False)))
except (errors.ValidationError, errors.ConversionError):
rrs.append(dnsvalue)
record[attr] = rrs
def _normalize_zone(zone):
if isinstance(zone, unicode):
# normalize only non-IDNA zones
try:
return unicode(zone.encode('ascii')).lower()
except UnicodeError:
pass
return zone
def _get_auth_zone_ldap(name):
"""
Find authoritative zone in LDAP for name. Only active zones are considered.
:param name:
:return: (zone, truncated)
zone: authoritative zone, or None if authoritative zone is not in LDAP
"""
assert isinstance(name, DNSName)
ldap = api.Backend.ldap2
# Create all possible parent zone names
search_name = name.make_absolute()
zone_names = []
for i in xrange(len(search_name)):
zone_name_abs = DNSName(search_name[i:]).ToASCII()
zone_names.append(zone_name_abs)
# compatibility with IPA < 4.0, zone name can be relative
zone_names.append(zone_name_abs[:-1])
# Create filters
objectclass_filter = ldap.make_filter({'objectclass':'idnszone'})
zonenames_filter = ldap.make_filter({'idnsname': zone_names})
zoneactive_filter = ldap.make_filter({'idnsZoneActive': 'true'})
complete_filter = ldap.combine_filters(
[objectclass_filter, zonenames_filter, zoneactive_filter],
rules=ldap.MATCH_ALL
)
try:
entries, truncated = ldap.find_entries(
filter=complete_filter,
attrs_list=['idnsname'],
base_dn=DN(api.env.container_dns, api.env.basedn),
scope=ldap.SCOPE_ONELEVEL
)
except errors.NotFound:
return None, False
# always use absolute zones
matched_auth_zones = [entry.single_value['idnsname'].make_absolute()
for entry in entries]
# return longest match
return max(matched_auth_zones, key=len), truncated
def _get_longest_match_ns_delegation_ldap(zone, name):
"""
Searches for deepest delegation for name in LDAP zone.
NOTE: NS record in zone apex is not considered as delegation.
It returns None if there is no delegation outside of zone apex.
Example:
zone: example.com.
name: ns.sub.example.com.
records:
extra.ns.sub.example.com.
sub.example.com.
example.com
result: sub.example.com.
:param zone: zone name
:param name:
:return: (match, truncated);
match: delegation name if success, or None if no delegation record exists
"""
assert isinstance(zone, DNSName)
assert isinstance(name, DNSName)
ldap = api.Backend.ldap2
# get zone DN
zone_dn = api.Object.dnszone.get_dn(zone)
if name.is_absolute():
relative_record_name = name.relativize(zone.make_absolute())
else:
relative_record_name = name
# Name is zone apex
if relative_record_name.is_empty():
return None, False
# create list of possible record names
possible_record_names = [DNSName(relative_record_name[i:]).ToASCII()
for i in xrange(len(relative_record_name))]
# search filters
name_filter = ldap.make_filter({'idnsname': [possible_record_names]})
objectclass_filter = ldap.make_filter({'objectclass': 'idnsrecord'})
complete_filter = ldap.combine_filters(
[name_filter, objectclass_filter],
rules=ldap.MATCH_ALL
)
try:
entries, truncated = ldap.find_entries(
filter=complete_filter,
attrs_list=['idnsname', 'nsrecord'],
base_dn=zone_dn,
scope=ldap.SCOPE_ONELEVEL
)
except errors.NotFound:
return None, False
matched_records = []
# test if entry contains NS records
for entry in entries:
if entry.get('nsrecord'):
matched_records.append(entry.single_value['idnsname'])
if not matched_records:
return None, truncated
# return longest match
return max(matched_records, key=len), truncated
def _find_subtree_forward_zones_ldap(name, child_zones_only=False):
"""
Search for forwardzone <name> and all child forwardzones
Filter: (|(*.<name>.)(<name>.))
:param name:
:param child_zones_only: search only for child zones
:return: (list of zonenames, truncated), list is empty if no zone found
"""
assert isinstance(name, DNSName)
ldap = api.Backend.ldap2
# prepare for filter "*.<name>."
search_name = u".%s" % name.make_absolute().ToASCII()
# we need to search zone with and without last dot, due compatibility
# with IPA < 4.0
search_names = [search_name, search_name[:-1]]
# Create filters
objectclass_filter = ldap.make_filter({'objectclass':'idnsforwardzone'})
zonenames_filter = ldap.make_filter({'idnsname': search_names}, exact=False,
trailing_wildcard=False)
if not child_zones_only:
# find also zone with exact name
exact_name = name.make_absolute().ToASCII()
# we need to search zone with and without last dot, due compatibility
# with IPA < 4.0
exact_names = [exact_name, exact_name[-1]]
exact_name_filter = ldap.make_filter({'idnsname': exact_names})
zonenames_filter = ldap.combine_filters([zonenames_filter,
exact_name_filter])
zoneactive_filter = ldap.make_filter({'idnsZoneActive': 'true'})
complete_filter = ldap.combine_filters(
[objectclass_filter, zonenames_filter, zoneactive_filter],
rules=ldap.MATCH_ALL
)
try:
entries, truncated = ldap.find_entries(
filter=complete_filter,
attrs_list=['idnsname'],
base_dn=DN(api.env.container_dns, api.env.basedn),
scope=ldap.SCOPE_ONELEVEL
)
except errors.NotFound:
return [], False
result = [entry.single_value['idnsname'].make_absolute()
for entry in entries]
return result, truncated
def _get_zone_which_makes_fw_zone_ineffective(fwzonename):
"""
Check if forward zone is effective.
If parent zone exists as authoritative zone, the forward zone will not
forward queries by default. It is necessary to delegate authority
to forward zone with a NS record.
Example:
Forward zone: sub.example.com
Zone: example.com
Forwarding will not work, because the server thinks it is authoritative
for zone and will return NXDOMAIN
Adding record: sub.example.com NS ns.sub.example.com.
will delegate authority, and IPA DNS server will forward DNS queries.
:param fwzonename: forwardzone
:return: (zone, truncated)
zone: None if effective, name of authoritative zone otherwise
"""
assert isinstance(fwzonename, DNSName)
auth_zone, truncated_zone = _get_auth_zone_ldap(fwzonename)
if not auth_zone:
return None, truncated_zone
delegation_record_name, truncated_ns =\
_get_longest_match_ns_delegation_ldap(auth_zone, fwzonename)
truncated = truncated_ns or truncated_zone
if delegation_record_name:
return None, truncated
return auth_zone, truncated
def _add_warning_fw_zone_is_not_effective(result, fwzone, version):
"""
Adds warning message to result, if required
"""
authoritative_zone, truncated = \
_get_zone_which_makes_fw_zone_ineffective(fwzone)
if authoritative_zone:
# forward zone is not effective and forwarding will not work
messages.add_message(
version, result,
messages.ForwardzoneIsNotEffectiveWarning(
fwzone=fwzone, authzone=authoritative_zone,
ns_rec=fwzone.relativize(authoritative_zone)
)
)
class DNSZoneBase(LDAPObject):
"""
Base class for DNS Zone
"""
container_dn = api.env.container_dns
object_class = ['top']
possible_objectclasses = ['ipadnszone']
default_attributes = [
'idnsname', 'idnszoneactive', 'idnsforwarders', 'idnsforwardpolicy'
]
takes_params = (
DNSNameParam('idnsname',
_no_wildcard_validator, # RFC 4592 section 4
only_absolute=True,
cli_name='name',
label=_('Zone name'),
doc=_('Zone name (FQDN)'),
default_from=lambda name_from_ip: _reverse_zone_name(name_from_ip),
normalizer=_normalize_zone,
primary_key=True,
),
Str('name_from_ip?', _validate_ipnet,
label=_('Reverse zone IP network'),
doc=_('IP network to create reverse zone name from'),
flags=('virtual_attribute',),
),
Bool('idnszoneactive?',
cli_name='zone_active',
label=_('Active zone'),
doc=_('Is zone active?'),
flags=['no_create', 'no_update'],
attribute=True,
),
Str('idnsforwarders*',
_validate_bind_forwarder,
cli_name='forwarder',
label=_('Zone forwarders'),
doc=_('Per-zone forwarders. A custom port can be specified '
'for each forwarder using a standard format "IP_ADDRESS port PORT"'),
csv=True,
),
StrEnum('idnsforwardpolicy?',
cli_name='forward_policy',
label=_('Forward policy'),
doc=_('Per-zone conditional forwarding policy. Set to "none" to '
'disable forwarding to global forwarder for this zone. In '
'that case, conditional zone forwarders are disregarded.'),
values=(u'only', u'first', u'none'),
),
)
def get_dn(self, *keys, **options):
if not dns_container_exists(self.api.Backend.ldap2):
raise errors.NotFound(reason=_('DNS is not configured'))
zone = keys[-1]
assert isinstance(zone, DNSName)
assert zone.is_absolute()
zone_a = zone.ToASCII()
# special case when zone is the root zone ('.')
if zone == DNSName.root:
return super(DNSZoneBase, self).get_dn(zone_a, **options)
# try first relative name, a new zone has to be added as absolute
# otherwise ObjectViolation is raised
zone_a = zone_a[:-1]
dn = super(DNSZoneBase, self).get_dn(zone_a, **options)
try:
self.backend.get_entry(dn, [''])
except errors.NotFound:
zone_a = u"%s." % zone_a
dn = super(DNSZoneBase, self).get_dn(zone_a, **options)
return dn
def permission_name(self, zone):
assert isinstance(zone, DNSName)
return u"Manage DNS zone %s" % zone.ToASCII()
def get_name_in_zone(self, zone, hostname):
"""
Get name of a record that is to be added to a new zone. I.e. when
we want to add record "ipa.lab.example.com" in a zone "example.com",
this function should return "ipa.lab". Returns None when record cannot
be added to a zone. Returns '@' when the hostname is the zone record.
"""
assert isinstance(zone, DNSName)
assert zone.is_absolute()
assert isinstance(hostname, DNSName)
if not hostname.is_absolute():
return hostname
if hostname.is_subdomain(zone):
return hostname.relativize(zone)
return None
def _remove_permission(self, zone):
permission_name = self.permission_name(zone)
try:
api.Command['permission_del'](permission_name, force=True)
except errors.NotFound, e:
if zone == DNSName.root: # special case root zone
raise
# compatibility, older IPA versions which allows to create zone
# without absolute zone name
permission_name_rel = self.permission_name(
zone.relativize(DNSName.root)
)
try:
api.Command['permission_del'](permission_name_rel, force=True)
except errors.NotFound:
raise e # re-raise original exception
def _make_zonename_absolute(self, entry_attrs, **options):
"""
Zone names can be relative in IPA < 4.0, make sure we always return
absolute zone name from ldap
"""
if options.get('raw'):
return
if "idnsname" in entry_attrs:
entry_attrs.single_value['idnsname'] = (
entry_attrs.single_value['idnsname'].make_absolute())
class DNSZoneBase_add(LDAPCreate):
has_output_params = LDAPCreate.has_output_params + dnszone_output_params
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
try:
entry = ldap.get_entry(dn)
except errors.NotFound:
pass
else:
if _check_entry_objectclass(entry, self.obj.object_class):
self.obj.handle_duplicate_entry(*keys)
else:
raise errors.DuplicateEntry(
message=_(u'Only one zone type is allowed per zone name')
)
entry_attrs['idnszoneactive'] = 'TRUE'
return dn
class DNSZoneBase_del(LDAPDelete):
def pre_callback(self, ldap, dn, *nkeys, **options):
assert isinstance(dn, DN)
if not _check_DN_objectclass(ldap, dn, self.obj.object_class):
self.obj.handle_not_found(*nkeys)
return dn
def post_callback(self, ldap, dn, *keys, **options):
try:
self.obj._remove_permission(keys[-1])
except errors.NotFound:
pass
return True
class DNSZoneBase_mod(LDAPUpdate):
has_output_params = LDAPUpdate.has_output_params + dnszone_output_params
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj._make_zonename_absolute(entry_attrs, **options)
return dn
class DNSZoneBase_find(LDAPSearch):
__doc__ = _('Search for DNS zones (SOA records).')
has_output_params = LDAPSearch.has_output_params + dnszone_output_params
def args_options_2_params(self, *args, **options):
# FIXME: Check that name_from_ip is valid. This is necessary because
# custom validation rules, including _validate_ipnet, are not
# used when doing a search. Once we have a parameter type for
# IP network objects, this will no longer be necessary, as the
# parameter type will handle the validation itself (see
# <https://fedorahosted.org/freeipa/ticket/2266>).
if 'name_from_ip' in options:
self.obj.params['name_from_ip'](unicode(options['name_from_ip']))
return super(DNSZoneBase_find, self).args_options_2_params(*args, **options)
def args_options_2_entry(self, *args, **options):
if 'name_from_ip' in options:
if 'idnsname' not in options:
options['idnsname'] = self.obj.params['idnsname'].get_default(**options)
del options['name_from_ip']
search_kw = super(DNSZoneBase_find, self).args_options_2_entry(*args,
**options)
name = search_kw.get('idnsname')
if name:
search_kw['idnsname'] = [name, name.relativize(DNSName.root)]
return search_kw
def pre_callback(self, ldap, filter, attrs_list, base_dn, scope, *args, **options):
assert isinstance(base_dn, DN)
# Check if DNS container exists must be here for find methods
if not dns_container_exists(self.api.Backend.ldap2):
raise errors.NotFound(reason=_('DNS is not configured'))
filter = _create_idn_filter(self, ldap, *args, **options)
return (filter, base_dn, scope)
def post_callback(self, ldap, entries, truncated, *args, **options):
for entry_attrs in entries:
self.obj._make_zonename_absolute(entry_attrs, **options)
return truncated
class DNSZoneBase_show(LDAPRetrieve):
has_output_params = LDAPRetrieve.has_output_params + dnszone_output_params
def pre_callback(self, ldap, dn, attrs_list, *keys, **options):
assert isinstance(dn, DN)
if not _check_DN_objectclass(ldap, dn, self.obj.object_class):
self.obj.handle_not_found(*keys)
return dn
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj._make_zonename_absolute(entry_attrs, **options)
return dn
class DNSZoneBase_disable(LDAPQuery):
has_output = output.standard_value
def execute(self, *keys, **options):
ldap = self.obj.backend
dn = self.obj.get_dn(*keys, **options)
entry = ldap.get_entry(dn, ['idnszoneactive', 'objectclass'])
if not _check_entry_objectclass(entry, self.obj.object_class):
self.obj.handle_not_found(*keys)
entry['idnszoneactive'] = ['FALSE']
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
pass
return dict(result=True, value=pkey_to_value(keys[-1], options))
class DNSZoneBase_enable(LDAPQuery):
has_output = output.standard_value
def execute(self, *keys, **options):
ldap = self.obj.backend
dn = self.obj.get_dn(*keys, **options)
entry = ldap.get_entry(dn, ['idnszoneactive', 'objectclass'])
if not _check_entry_objectclass(entry, self.obj.object_class):
self.obj.handle_not_found(*keys)
entry['idnszoneactive'] = ['TRUE']
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
pass
return dict(result=True, value=pkey_to_value(keys[-1], options))
class DNSZoneBase_add_permission(LDAPQuery):
has_output = _output_permissions
msg_summary = _('Added system permission "%(value)s"')
def execute(self, *keys, **options):
ldap = self.obj.backend
dn = self.obj.get_dn(*keys, **options)
try:
entry_attrs = ldap.get_entry(dn, ['objectclass'])
except errors.NotFound:
self.obj.handle_not_found(*keys)
else:
if not _check_entry_objectclass(entry_attrs, self.obj.object_class):
self.obj.handle_not_found(*keys)
permission_name = self.obj.permission_name(keys[-1])
# compatibility with older IPA versions which allows relative zonenames
if keys[-1] != DNSName.root: # special case root zone
permission_name_rel = self.obj.permission_name(
keys[-1].relativize(DNSName.root)
)
try:
api.Object['permission'].get_dn_if_exists(permission_name_rel)
except errors.NotFound:
pass
else:
# permission exists without absolute domain name
raise errors.DuplicateEntry(
message=_('permission "%(value)s" already exists') % {
'value': permission_name
}
)
permission = api.Command['permission_add_noaci'](permission_name,
ipapermissiontype=u'SYSTEM'
)['result']
dnszone_ocs = entry_attrs.get('objectclass')
if dnszone_ocs:
for oc in dnszone_ocs:
if oc.lower() == 'ipadnszone':
break
else:
dnszone_ocs.append('ipadnszone')
entry_attrs['managedby'] = [permission['dn']]
ldap.update_entry(entry_attrs)
return dict(
result=True,
value=pkey_to_value(permission_name, options),
)
class DNSZoneBase_remove_permission(LDAPQuery):
has_output = _output_permissions
msg_summary = _('Removed system permission "%(value)s"')
def execute(self, *keys, **options):
ldap = self.obj.backend
dn = self.obj.get_dn(*keys, **options)
try:
entry = ldap.get_entry(dn, ['managedby', 'objectclass'])
except errors.NotFound:
self.obj.handle_not_found(*keys)
else:
if not _check_entry_objectclass(entry, self.obj.object_class):
self.obj.handle_not_found(*keys)
entry['managedby'] = None
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
# managedBy attribute is clean, lets make sure there is also no
# dangling DNS zone permission
pass
permission_name = self.obj.permission_name(keys[-1])
self.obj._remove_permission(keys[-1])
return dict(
result=True,
value=pkey_to_value(permission_name, options),
)
@register()
class dnszone(DNSZoneBase):
"""
DNS Zone, container for resource records.
"""
object_name = _('DNS zone')
object_name_plural = _('DNS zones')
object_class = DNSZoneBase.object_class + ['idnsrecord', 'idnszone']
default_attributes = DNSZoneBase.default_attributes + [
'idnssoamname', 'idnssoarname', 'idnssoaserial', 'idnssoarefresh',
'idnssoaretry', 'idnssoaexpire', 'idnssoaminimum', 'idnsallowquery',
'idnsallowtransfer', 'idnssecinlinesigning',
] + _record_attributes
label = _('DNS Zones')
label_singular = _('DNS Zone')
takes_params = DNSZoneBase.takes_params + (
DNSNameParam('idnssoamname?',
cli_name='name_server',
label=_('Authoritative nameserver'),
doc=_('Authoritative nameserver domain name'),
default=None, # value will be added in precallback from ldap
),
DNSNameParam('idnssoarname',
_rname_validator,
cli_name='admin_email',
label=_('Administrator e-mail address'),
doc=_('Administrator e-mail address'),
default=DNSName(u'hostmaster'),
normalizer=normalize_zonemgr,
autofill=True,
),
Int('idnssoaserial',
cli_name='serial',
label=_('SOA serial'),
doc=_('SOA record serial number'),
minvalue=1,
maxvalue=4294967295,
default_from=_create_zone_serial,
autofill=True,
),
Int('idnssoarefresh',
cli_name='refresh',
label=_('SOA refresh'),
doc=_('SOA record refresh time'),
minvalue=0,
maxvalue=2147483647,
default=3600,
autofill=True,
),
Int('idnssoaretry',
cli_name='retry',
label=_('SOA retry'),
doc=_('SOA record retry time'),
minvalue=0,
maxvalue=2147483647,
default=900,
autofill=True,
),
Int('idnssoaexpire',
cli_name='expire',
label=_('SOA expire'),
doc=_('SOA record expire time'),
default=1209600,
minvalue=0,
maxvalue=2147483647,
autofill=True,
),
Int('idnssoaminimum',
cli_name='minimum',
label=_('SOA minimum'),
doc=_('How long should negative responses be cached'),
default=3600,
minvalue=0,
maxvalue=2147483647,
autofill=True,
),
Int('dnsttl?',
cli_name='ttl',
label=_('Time to live'),
doc=_('Time to live for records at zone apex'),
minvalue=0,
maxvalue=2147483647, # see RFC 2181
),
StrEnum('dnsclass?',
# Deprecated
cli_name='class',
flags=['no_option'],
values=_record_classes,
),
Str('idnsupdatepolicy?',
cli_name='update_policy',
label=_('BIND update policy'),
doc=_('BIND update policy'),
default_from=lambda idnsname: default_zone_update_policy(idnsname),
autofill=True
),
Bool('idnsallowdynupdate?',
cli_name='dynamic_update',
label=_('Dynamic update'),
doc=_('Allow dynamic updates.'),
attribute=True,
default=False,
autofill=True
),
Str('idnsallowquery?',
_validate_bind_aci,
normalizer=_normalize_bind_aci,
cli_name='allow_query',
label=_('Allow query'),
doc=_('Semicolon separated list of IP addresses or networks which are allowed to issue queries'),
default=u'any;', # anyone can issue queries by default
autofill=True,
),
Str('idnsallowtransfer?',
_validate_bind_aci,
normalizer=_normalize_bind_aci,
cli_name='allow_transfer',
label=_('Allow transfer'),
doc=_('Semicolon separated list of IP addresses or networks which are allowed to transfer the zone'),
default=u'none;', # no one can issue queries by default
autofill=True,
),
Bool('idnsallowsyncptr?',
cli_name='allow_sync_ptr',
label=_('Allow PTR sync'),
doc=_('Allow synchronization of forward (A, AAAA) and reverse (PTR) records in the zone'),
),
Bool('idnssecinlinesigning?',
cli_name='dnssec',
default=False,
label=_('Allow in-line DNSSEC signing'),
doc=_('Allow inline DNSSEC signing of records in the zone'),
),
Str('nsec3paramrecord?',
_validate_nsec3param_record,
cli_name='nsec3param_rec',
label=_('NSEC3PARAM record'),
doc=_('NSEC3PARAM record for zone in format: hash_algorithm flags iterations salt'),
pattern=r'^\d+ \d+ \d+ (([0-9a-fA-F]{2})+|-)$',
pattern_errmsg=(u'expected format: <0-255> <0-255> <0-65535> '
'even-length_hexadecimal_digits_or_hyphen'),
),
)
# Permissions will be apllied for forwardzones too
# Store permissions into api.env.basedn, dns container could not exists
managed_permissions = {
'System: Add DNS Entries': {
'non_object': True,
'ipapermright': {'add'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('idnsname=*', 'cn=dns', api.env.basedn),
'replaces': [
'(target = "ldap:///idnsname=*,cn=dns,$SUFFIX")(version 3.0;acl "permission:add dns entries";allow (add) groupdn = "ldap:///cn=add dns entries,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'DNS Administrators', 'DNS Servers'},
},
'System: Read DNS Entries': {
'non_object': True,
'ipapermright': {'read', 'search', 'compare'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('idnsname=*', 'cn=dns', api.env.basedn),
'ipapermdefaultattr': {
'objectclass',
'a6record', 'aaaarecord', 'afsdbrecord', 'aplrecord', 'arecord',
'certrecord', 'cn', 'cnamerecord', 'dhcidrecord', 'dlvrecord',
'dnamerecord', 'dnsclass', 'dnsttl', 'dsrecord',
'hinforecord', 'hiprecord', 'idnsallowdynupdate',
'idnsallowquery', 'idnsallowsyncptr', 'idnsallowtransfer',
'idnsforwarders', 'idnsforwardpolicy', 'idnsname',
'idnssecinlinesigning', 'idnssoaexpire', 'idnssoaminimum',
'idnssoamname', 'idnssoarefresh', 'idnssoaretry',
'idnssoarname', 'idnssoaserial', 'idnsupdatepolicy',
'idnszoneactive', 'ipseckeyrecord','keyrecord', 'kxrecord',
'locrecord', 'managedby', 'mdrecord', 'minforecord',
'mxrecord', 'naptrrecord', 'nsecrecord', 'nsec3paramrecord',
'nsrecord', 'nxtrecord', 'ptrrecord', 'rprecord', 'rrsigrecord',
'sigrecord', 'spfrecord', 'srvrecord', 'sshfprecord',
'tlsarecord', 'txtrecord', 'unknownrecord',
},
'replaces_system': ['Read DNS Entries'],
'default_privileges': {'DNS Administrators', 'DNS Servers'},
},
'System: Remove DNS Entries': {
'non_object': True,
'ipapermright': {'delete'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('idnsname=*', 'cn=dns', api.env.basedn),
'replaces': [
'(target = "ldap:///idnsname=*,cn=dns,$SUFFIX")(version 3.0;acl "permission:remove dns entries";allow (delete) groupdn = "ldap:///cn=remove dns entries,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'DNS Administrators', 'DNS Servers'},
},
'System: Update DNS Entries': {
'non_object': True,
'ipapermright': {'write'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('idnsname=*', 'cn=dns', api.env.basedn),
'ipapermdefaultattr': {
'a6record', 'aaaarecord', 'afsdbrecord', 'aplrecord', 'arecord',
'certrecord', 'cn', 'cnamerecord', 'dhcidrecord', 'dlvrecord',
'dnamerecord', 'dnsclass', 'dnsttl', 'dsrecord',
'hinforecord', 'hiprecord', 'idnsallowdynupdate',
'idnsallowquery', 'idnsallowsyncptr', 'idnsallowtransfer',
'idnsforwarders', 'idnsforwardpolicy', 'idnsname',
'idnssecinlinesigning', 'idnssoaexpire', 'idnssoaminimum',
'idnssoamname', 'idnssoarefresh', 'idnssoaretry',
'idnssoarname', 'idnssoaserial', 'idnsupdatepolicy',
'idnszoneactive', 'ipseckeyrecord','keyrecord', 'kxrecord',
'locrecord', 'managedby', 'mdrecord', 'minforecord',
'mxrecord', 'naptrrecord', 'nsecrecord', 'nsec3paramrecord',
'nsrecord', 'nxtrecord', 'ptrrecord', 'rprecord', 'rrsigrecord',
'sigrecord', 'spfrecord', 'srvrecord', 'sshfprecord',
'tlsarecord', 'txtrecord', 'unknownrecord',
},
'replaces': [
'(targetattr = "idnsname || cn || idnsallowdynupdate || dnsttl || dnsclass || arecord || aaaarecord || a6record || nsrecord || cnamerecord || ptrrecord || srvrecord || txtrecord || mxrecord || mdrecord || hinforecord || minforecord || afsdbrecord || sigrecord || keyrecord || locrecord || nxtrecord || naptrrecord || kxrecord || certrecord || dnamerecord || dsrecord || sshfprecord || rrsigrecord || nsecrecord || idnsname || idnszoneactive || idnssoamname || idnssoarname || idnssoaserial || idnssoarefresh || idnssoaretry || idnssoaexpire || idnssoaminimum || idnsupdatepolicy")(target = "ldap:///idnsname=*,cn=dns,$SUFFIX")(version 3.0;acl "permission:update dns entries";allow (write) groupdn = "ldap:///cn=update dns entries,cn=permissions,cn=pbac,$SUFFIX";)',
'(targetattr = "idnsname || cn || idnsallowdynupdate || dnsttl || dnsclass || arecord || aaaarecord || a6record || nsrecord || cnamerecord || ptrrecord || srvrecord || txtrecord || mxrecord || mdrecord || hinforecord || minforecord || afsdbrecord || sigrecord || keyrecord || locrecord || nxtrecord || naptrrecord || kxrecord || certrecord || dnamerecord || dsrecord || sshfprecord || rrsigrecord || nsecrecord || idnsname || idnszoneactive || idnssoamname || idnssoarname || idnssoaserial || idnssoarefresh || idnssoaretry || idnssoaexpire || idnssoaminimum || idnsupdatepolicy || idnsallowquery || idnsallowtransfer || idnsallowsyncptr || idnsforwardpolicy || idnsforwarders")(target = "ldap:///idnsname=*,cn=dns,$SUFFIX")(version 3.0;acl "permission:update dns entries";allow (write) groupdn = "ldap:///cn=update dns entries,cn=permissions,cn=pbac,$SUFFIX";)',
'(targetattr = "idnsname || cn || idnsallowdynupdate || dnsttl || dnsclass || arecord || aaaarecord || a6record || nsrecord || cnamerecord || ptrrecord || srvrecord || txtrecord || mxrecord || mdrecord || hinforecord || minforecord || afsdbrecord || sigrecord || keyrecord || locrecord || nxtrecord || naptrrecord || kxrecord || certrecord || dnamerecord || dsrecord || sshfprecord || rrsigrecord || nsecrecord || idnsname || idnszoneactive || idnssoamname || idnssoarname || idnssoaserial || idnssoarefresh || idnssoaretry || idnssoaexpire || idnssoaminimum || idnsupdatepolicy || idnsallowquery || idnsallowtransfer || idnsallowsyncptr || idnsforwardpolicy || idnsforwarders || managedby")(target = "ldap:///idnsname=*,cn=dns,$SUFFIX")(version 3.0;acl "permission:update dns entries";allow (write) groupdn = "ldap:///cn=update dns entries,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'DNS Administrators', 'DNS Servers'},
},
'System: Read DNSSEC metadata': {
'non_object': True,
'ipapermright': {'read', 'search', 'compare'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=dns', api.env.basedn),
'ipapermtargetfilter': ['(objectclass=idnsSecKey)'],
'ipapermdefaultattr': {
'idnsSecAlgorithm', 'idnsSecKeyCreated', 'idnsSecKeyPublish',
'idnsSecKeyActivate', 'idnsSecKeyInactive', 'idnsSecKeyDelete',
'idnsSecKeyZone', 'idnsSecKeyRevoke', 'idnsSecKeySep',
'idnsSecKeyRef', 'cn', 'objectclass',
},
'default_privileges': {'DNS Administrators'},
},
'System: Manage DNSSEC metadata': {
'non_object': True,
'ipapermright': {'all'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=dns', api.env.basedn),
'ipapermtargetfilter': ['(objectclass=idnsSecKey)'],
'ipapermdefaultattr': {
'idnsSecAlgorithm', 'idnsSecKeyCreated', 'idnsSecKeyPublish',
'idnsSecKeyActivate', 'idnsSecKeyInactive', 'idnsSecKeyDelete',
'idnsSecKeyZone', 'idnsSecKeyRevoke', 'idnsSecKeySep',
'idnsSecKeyRef', 'cn', 'objectclass',
},
'default_privileges': {'DNS Servers'},
},
'System: Manage DNSSEC keys': {
'non_object': True,
'ipapermright': {'all'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=keys', 'cn=sec', 'cn=dns', api.env.basedn),
'ipapermdefaultattr': {
'ipaPublicKey', 'ipaPrivateKey', 'ipaSecretKey',
'ipaWrappingMech','ipaWrappingKey',
'ipaSecretKeyRef', 'ipk11Private', 'ipk11Modifiable', 'ipk11Label',
'ipk11Copyable', 'ipk11Destroyable', 'ipk11Trusted',
'ipk11CheckValue', 'ipk11StartDate', 'ipk11EndDate',
'ipk11UniqueId', 'ipk11PublicKeyInfo', 'ipk11Distrusted',
'ipk11Subject', 'ipk11Id', 'ipk11Local', 'ipk11KeyType',
'ipk11Derive', 'ipk11KeyGenMechanism', 'ipk11AllowedMechanisms',
'ipk11Encrypt', 'ipk11Verify', 'ipk11VerifyRecover', 'ipk11Wrap',
'ipk11WrapTemplate', 'ipk11Sensitive', 'ipk11Decrypt',
'ipk11Sign', 'ipk11SignRecover', 'ipk11Unwrap',
'ipk11Extractable', 'ipk11AlwaysSensitive',
'ipk11NeverExtractable', 'ipk11WrapWithTrusted',
'ipk11UnwrapTemplate', 'ipk11AlwaysAuthenticate',
'objectclass',
},
'default_privileges': {'DNS Servers'},
},
}
def _rr_zone_postprocess(self, record, **options):
#Decode IDN ACE form to Unicode, raw records are passed directly from LDAP
if options.get('raw', False):
return
_records_idn_postprocess(record, **options)
def _warning_forwarding(self, result, **options):
if ('idnsforwarders' in result['result']):
messages.add_message(options.get('version', VERSION_WITHOUT_CAPABILITIES),
result, messages.ForwardersWarning())
def _warning_dnssec_experimental(self, result, *keys, **options):
# add warning when user use option --dnssec
if 'idnssecinlinesigning' in options:
if options['idnssecinlinesigning'] is True:
messages.add_message(options['version'], result,
messages.DNSSECWarning(
additional_info=_("Visit 'http://www.freeipa.org/page/Releases/4.1.0#DNSSEC_Support'.")
))
else:
messages.add_message(options['version'], result,
messages.DNSSECWarning(
additional_info=_("If you encounter any problems please "
"report them and restart 'named' service on affected IPA "
"server.")
))
def _warning_name_server_option(self, result, context, **options):
if getattr(context, 'show_warning_nameserver_option', False):
messages.add_message(
options['version'],
result, messages.OptionSemanticChangedWarning(
label=_(u"setting Authoritative nameserver"),
current_behavior=_(u"It is used only for setting the "
u"SOA MNAME attribute."),
hint=_(u"NS record(s) can be edited in zone apex - '@'. ")
)
)
def _warning_fw_zone_is_not_effective(self, result, *keys, **options):
"""
Warning if any operation with zone causes, a child forward zone is
not effective
"""
zone = keys[-1]
affected_fw_zones, truncated = _find_subtree_forward_zones_ldap(
zone, child_zones_only=True)
if not affected_fw_zones:
return
for fwzone in affected_fw_zones:
_add_warning_fw_zone_is_not_effective(result, fwzone,
options['version'])
@register()
class dnszone_add(DNSZoneBase_add):
__doc__ = _('Create new DNS zone (SOA record).')
takes_options = DNSZoneBase_add.takes_options + (
Flag('force',
label=_('Force'),
doc=_('Force DNS zone creation even if nameserver is not resolvable.'),
),
# Deprecated
# ip-address option is not used anymore, we have to keep it
# due to compability with clients older than 4.1
Str('ip_address?',
flags=['no_option', ]
),
)
def _warning_deprecated_option(self, result, **options):
if 'ip_address' in options:
messages.add_message(
options['version'],
result,
messages.OptionDeprecatedWarning(
option='ip-address',
additional_info=u"Value will be ignored.")
)
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
dn = super(dnszone_add, self).pre_callback(
ldap, dn, entry_attrs, attrs_list, *keys, **options)
nameservers = [normalize_zone(x) for x in api.Object.dnsrecord.get_dns_masters()]
server = normalize_zone(api.env.host)
zone = keys[-1]
if entry_attrs.get('idnssoamname'):
if zone.is_reverse() and not entry_attrs['idnssoamname'].is_absolute():
raise errors.ValidationError(
name='name-server',
error=_("Nameserver for reverse zone cannot be a relative DNS name"))
# verify if user specified server is resolvable
if not options['force']:
check_ns_rec_resolvable(keys[0], entry_attrs['idnssoamname'])
# show warning about --name-server option
context.show_warning_nameserver_option = True
else:
# user didn't specify SOA mname
if server in nameservers:
# current ipa server is authoritative nameserver in SOA record
entry_attrs['idnssoamname'] = [server]
else:
# a first DNS capable server is authoritative nameserver in SOA record
entry_attrs['idnssoamname'] = [nameservers[0]]
# all ipa DNS servers should be in NS zone record (as absolute domain name)
entry_attrs['nsrecord'] = nameservers
return dn
def execute(self, *keys, **options):
result = super(dnszone_add, self).execute(*keys, **options)
self._warning_deprecated_option(result, **options)
self.obj._warning_forwarding(result, **options)
self.obj._warning_dnssec_experimental(result, *keys, **options)
self.obj._warning_name_server_option(result, context, **options)
self.obj._warning_fw_zone_is_not_effective(result, *keys, **options)
return result
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
# Add entry to realmdomains
# except for our own domain, forward zones, reverse zones and root zone
zone = keys[0]
if (zone != DNSName(api.env.domain).make_absolute() and
not options.get('idnsforwarders') and
not zone.is_reverse() and
zone != DNSName.root):
try:
api.Command['realmdomains_mod'](add_domain=unicode(zone),
force=True)
except (errors.EmptyModlist, errors.ValidationError):
pass
self.obj._rr_zone_postprocess(entry_attrs, **options)
return dn
@register()
class dnszone_del(DNSZoneBase_del):
__doc__ = _('Delete DNS zone (SOA record).')
msg_summary = _('Deleted DNS zone "%(value)s"')
def execute(self, *keys, **options):
result = super(dnszone_del, self).execute(*keys, **options)
nkeys = keys[-1] # we can delete more zones
for key in nkeys:
self.obj._warning_fw_zone_is_not_effective(result, key, **options)
return result
def post_callback(self, ldap, dn, *keys, **options):
super(dnszone_del, self).post_callback(ldap, dn, *keys, **options)
# Delete entry from realmdomains
# except for our own domain, reverse zone, and root zone
zone = keys[0].make_absolute()
if (zone != DNSName(api.env.domain).make_absolute() and
not zone.is_reverse() and zone != DNSName.root
):
try:
api.Command['realmdomains_mod'](del_domain=unicode(zone),
force=True)
except (errors.AttrValueNotFound, errors.ValidationError):
pass
return True
@register()
class dnszone_mod(DNSZoneBase_mod):
__doc__ = _('Modify DNS zone (SOA record).')
takes_options = DNSZoneBase_mod.takes_options + (
Flag('force',
label=_('Force'),
doc=_('Force nameserver change even if nameserver not in DNS'),
),
)
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
if not _check_DN_objectclass(ldap, dn, self.obj.object_class):
self.obj.handle_not_found(*keys)
if 'idnssoamname' in entry_attrs:
nameserver = entry_attrs['idnssoamname']
if nameserver:
if not nameserver.is_empty() and not options['force']:
check_ns_rec_resolvable(keys[0], nameserver)
context.show_warning_nameserver_option = True
else:
# empty value, this option is required by ldap
raise errors.ValidationError(
name='name_server',
error=_(u"is required"))
return dn
def execute(self, *keys, **options):
result = super(dnszone_mod, self).execute(*keys, **options)
self.obj._warning_forwarding(result, **options)
self.obj._warning_dnssec_experimental(result, *keys, **options)
self.obj._warning_name_server_option(result, context, **options)
return result
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
dn = super(dnszone_mod, self).post_callback(ldap, dn, entry_attrs,
*keys, **options)
self.obj._rr_zone_postprocess(entry_attrs, **options)
return dn
@register()
class dnszone_find(DNSZoneBase_find):
__doc__ = _('Search for DNS zones (SOA records).')
takes_options = DNSZoneBase_find.takes_options + (
Flag('forward_only',
label=_('Forward zones only'),
cli_name='forward_only',
doc=_('Search for forward zones only'),
),
)
def pre_callback(self, ldap, filter, attrs_list, base_dn, scope, *args, **options):
assert isinstance(base_dn, DN)
filter, base, dn = super(dnszone_find, self).pre_callback(ldap, filter,
attrs_list, base_dn, scope, *args, **options)
if options.get('forward_only', False):
search_kw = {}
search_kw['idnsname'] = [revzone.ToASCII() for revzone in
REVERSE_DNS_ZONES.keys()]
rev_zone_filter = ldap.make_filter(search_kw,
rules=ldap.MATCH_NONE,
exact=False,
trailing_wildcard=False)
filter = ldap.combine_filters((rev_zone_filter, filter),
rules=ldap.MATCH_ALL)
return (filter, base_dn, scope)
def post_callback(self, ldap, entries, truncated, *args, **options):
truncated = super(dnszone_find, self).post_callback(ldap, entries,
truncated, *args,
**options)
for entry_attrs in entries:
self.obj._rr_zone_postprocess(entry_attrs, **options)
return truncated
@register()
class dnszone_show(DNSZoneBase_show):
__doc__ = _('Display information about a DNS zone (SOA record).')
def execute(self, *keys, **options):
result = super(dnszone_show, self).execute(*keys, **options)
self.obj._warning_forwarding(result, **options)
return result
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
dn = super(dnszone_show, self).post_callback(ldap, dn, entry_attrs,
*keys, **options)
self.obj._rr_zone_postprocess(entry_attrs, **options)
return dn
@register()
class dnszone_disable(DNSZoneBase_disable):
__doc__ = _('Disable DNS Zone.')
msg_summary = _('Disabled DNS zone "%(value)s"')
def execute(self, *keys, **options):
result = super(dnszone_disable, self).execute(*keys, **options)
self.obj._warning_fw_zone_is_not_effective(result, *keys, **options)
return result
@register()
class dnszone_enable(DNSZoneBase_enable):
__doc__ = _('Enable DNS Zone.')
msg_summary = _('Enabled DNS zone "%(value)s"')
def execute(self, *keys, **options):
result = super(dnszone_enable, self).execute(*keys, **options)
self.obj._warning_fw_zone_is_not_effective(result, *keys, **options)
return result
@register()
class dnszone_add_permission(DNSZoneBase_add_permission):
__doc__ = _('Add a permission for per-zone access delegation.')
@register()
class dnszone_remove_permission(DNSZoneBase_remove_permission):
__doc__ = _('Remove a permission for per-zone access delegation.')
@register()
class dnsrecord(LDAPObject):
"""
DNS record.
"""
parent_object = 'dnszone'
container_dn = api.env.container_dns
object_name = _('DNS resource record')
object_name_plural = _('DNS resource records')
object_class = ['top', 'idnsrecord']
permission_filter_objectclasses = ['idnsrecord']
default_attributes = ['idnsname'] + _record_attributes
rdn_is_primary_key = True
label = _('DNS Resource Records')
label_singular = _('DNS Resource Record')
takes_params = (
DNSNameParam('idnsname',
cli_name='name',
label=_('Record name'),
doc=_('Record name'),
primary_key=True,
),
Int('dnsttl?',
cli_name='ttl',
label=_('Time to live'),
doc=_('Time to live'),
),
StrEnum('dnsclass?',
# Deprecated
cli_name='class',
flags=['no_option'],
values=_record_classes,
),
) + _dns_record_options
structured_flag = Flag('structured',
label=_('Structured'),
doc=_('Parse all raw DNS records and return them in a structured way'),
)
def _dsrecord_pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
dsrecords = entry_attrs.get('dsrecord')
if dsrecords and self.is_pkey_zone_record(*keys):
raise errors.ValidationError(
name='dsrecord',
error=unicode(_('DS record must not be in zone apex (RFC 4035 section 2.4)')))
def _nsrecord_pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
nsrecords = entry_attrs.get('nsrecord')
if options.get('force', False) or nsrecords is None:
return
for nsrecord in nsrecords:
check_ns_rec_resolvable(keys[0], DNSName(nsrecord))
def _idnsname_pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
if keys[-1].is_absolute():
if keys[-1].is_subdomain(keys[-2]):
entry_attrs['idnsname'] = [keys[-1].relativize(keys[-2])]
elif not self.is_pkey_zone_record(*keys):
raise errors.ValidationError(name='idnsname',
error=unicode(_('out-of-zone data: record name must '
'be a subdomain of the zone or a '
'relative name')))
# dissallowed wildcard (RFC 4592 section 4)
no_wildcard_rtypes = ['DNAME', 'DS', 'NS']
if (keys[-1].is_wild() and
any(entry_attrs.get('%srecord' % r.lower())
for r in no_wildcard_rtypes)
):
raise errors.ValidationError(
name='idnsname',
error=(_('owner of %(types)s records '
'should not be a wildcard domain name (RFC 4592 section 4)') %
{'types': ', '.join(no_wildcard_rtypes)}
)
)
def _ptrrecord_pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
ptrrecords = entry_attrs.get('ptrrecord')
if ptrrecords is None:
return
zone = keys[-2]
if self.is_pkey_zone_record(*keys):
addr = _dns_zone_record
else:
addr = keys[-1]
zone_len = 0
for valid_zone in REVERSE_DNS_ZONES:
if zone.is_subdomain(valid_zone):
zone = zone.relativize(valid_zone)
zone_name = valid_zone
zone_len = REVERSE_DNS_ZONES[valid_zone]
if not zone_len:
allowed_zones = ', '.join([unicode(revzone) for revzone in
REVERSE_DNS_ZONES.keys()])
raise errors.ValidationError(name='ptrrecord',
error=unicode(_('Reverse zone for PTR record should be a sub-zone of one the following fully qualified domains: %s') % allowed_zones))
addr_len = len(addr.labels)
# Classless zones (0/25.0.0.10.in-addr.arpa.) -> skip check
# zone has to be checked without reverse domain suffix (in-addr.arpa.)
for sign in ('/', '-'):
for name in (zone, addr):
for label in name.labels:
if sign in label:
return
ip_addr_comp_count = addr_len + len(zone.labels)
if ip_addr_comp_count != zone_len:
raise errors.ValidationError(name='ptrrecord',
error=unicode(_('Reverse zone %(name)s requires exactly '
'%(count)d IP address components, '
'%(user_count)d given')
% dict(name=zone_name,
count=zone_len,
user_count=ip_addr_comp_count)))
def run_precallback_validators(self, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
ldap = self.api.Backend.ldap2
for rtype in entry_attrs.keys():
rtype_cb = getattr(self, '_%s_pre_callback' % rtype, None)
if rtype_cb:
rtype_cb(ldap, dn, entry_attrs, *keys, **options)
def is_pkey_zone_record(self, *keys):
assert isinstance(keys[-1], DNSName)
assert isinstance(keys[-2], DNSName)
idnsname = keys[-1]
zonename = keys[-2]
if idnsname.is_empty() or idnsname == zonename:
return True
return False
def check_zone(self, zone, **options):
"""
Check if zone exists and if is master zone
"""
parent_object = self.api.Object[self.parent_object]
dn = parent_object.get_dn(zone, **options)
ldap = self.api.Backend.ldap2
try:
entry = ldap.get_entry(dn, ['objectclass'])
except errors.NotFound:
parent_object.handle_not_found(zone)
else:
# only master zones can contain records
if 'idnszone' not in [x.lower() for x in entry.get('objectclass', [])]:
raise errors.ValidationError(
name='dnszoneidnsname',
error=_(u'only master zones can contain records')
)
return dn
def get_dn(self, *keys, **options):
if not dns_container_exists(self.api.Backend.ldap2):
raise errors.NotFound(reason=_('DNS is not configured'))
dn = self.check_zone(keys[-2], **options)
if self.is_pkey_zone_record(*keys):
return dn
#Make RR name relative if possible
relative_name = keys[-1].relativize(keys[-2]).ToASCII()
keys = keys[:-1] + (relative_name,)
return super(dnsrecord, self).get_dn(*keys, **options)
def attr_to_cli(self, attr):
try:
cliname = attr[:-len('record')].upper()
except IndexError:
cliname = attr
return cliname
def get_dns_masters(self):
ldap = self.api.Backend.ldap2
base_dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), self.api.env.basedn)
ldap_filter = '(&(objectClass=ipaConfigObject)(cn=DNS))'
dns_masters = []
try:
entries = ldap.find_entries(filter=ldap_filter, base_dn=base_dn)[0]
for entry in entries:
try:
master = entry.dn[1]['cn']
dns_masters.append(master)
except (IndexError, KeyError):
pass
except errors.NotFound:
return []
return dns_masters
def has_cli_options(self, options, no_option_msg, allow_empty_attrs=False):
if any(k in options for k in ('setattr', 'addattr', 'delattr', 'rename')):
return
has_options = False
for attr in options.keys():
if attr in self.params and not self.params[attr].primary_key:
if options[attr] or allow_empty_attrs:
has_options = True
break
if not has_options:
raise errors.OptionError(no_option_msg)
def get_record_entry_attrs(self, entry_attrs):
entry_attrs = entry_attrs.copy()
for attr in entry_attrs.keys():
if attr not in self.params or self.params[attr].primary_key:
del entry_attrs[attr]
return entry_attrs
def postprocess_record(self, record, **options):
if options.get('structured', False):
for attr in record.keys():
# attributes in LDAPEntry may not be normalized
attr = attr.lower()
try:
param = self.params[attr]
except KeyError:
continue
if not isinstance(param, DNSRecord):
continue
parts_params = param.get_parts()
for dnsvalue in record[attr]:
dnsentry = {
u'dnstype' : unicode(param.rrtype),
u'dnsdata' : dnsvalue
}
values = param._get_part_values(dnsvalue)
if values is None:
continue
for val_id, val in enumerate(values):
if val is not None:
#decode IDN
if isinstance(parts_params[val_id], DNSNameParam):
dnsentry[parts_params[val_id].name] = \
_dns_name_to_string(val,
options.get('raw', False))
else:
dnsentry[parts_params[val_id].name] = val
record.setdefault('dnsrecords', []).append(dnsentry)
del record[attr]
elif not options.get('raw', False):
#Decode IDN ACE form to Unicode, raw records are passed directly from LDAP
_records_idn_postprocess(record, **options)
def get_rrparam_from_part(self, part_name):
"""
Get an instance of DNSRecord parameter that has part_name as its part.
If such parameter is not found, None is returned
:param part_name Part parameter name
"""
try:
param = self.params[part_name]
if not any(flag in param.flags for flag in \
('dnsrecord_part', 'dnsrecord_extra')):
return None
# All DNS record part or extra parameters contain a name of its
# parent RR parameter in its hint attribute
rrparam = self.params[param.hint]
except (KeyError, AttributeError):
return None
return rrparam
def iterate_rrparams_by_parts(self, kw, skip_extra=False):
"""
Iterates through all DNSRecord instances that has at least one of its
parts or extra options in given dictionary. It returns the DNSRecord
instance only for the first occurence of part/extra option.
:param kw Dictionary with DNS record parts or extra options
:param skip_extra Skip DNS record extra options, yield only DNS records
with a real record part
"""
processed = []
for opt in kw:
rrparam = self.get_rrparam_from_part(opt)
if rrparam is None:
continue
if skip_extra and 'dnsrecord_extra' in self.params[opt].flags:
continue
if rrparam.name not in processed:
processed.append(rrparam.name)
yield rrparam
def updated_rrattrs(self, old_entry, entry_attrs):
"""Returns updated RR attributes
"""
rrattrs = {}
if old_entry is not None:
old_rrattrs = dict((key, value) for key, value in old_entry.iteritems()
if key in self.params and
isinstance(self.params[key], DNSRecord))
rrattrs.update(old_rrattrs)
new_rrattrs = dict((key, value) for key, value in entry_attrs.iteritems()
if key in self.params and
isinstance(self.params[key], DNSRecord))
rrattrs.update(new_rrattrs)
return rrattrs
def check_record_type_collisions(self, keys, rrattrs):
# Test that only allowed combination of record types was created
# CNAME record validation
cnames = rrattrs.get('cnamerecord')
if cnames is not None:
if len(cnames) > 1:
raise errors.ValidationError(name='cnamerecord',
error=_('only one CNAME record is allowed per name '
'(RFC 2136, section 1.1.5)'))
if any(rrvalue is not None
and rrattr != 'cnamerecord'
for rrattr, rrvalue in rrattrs.iteritems()):
raise errors.ValidationError(name='cnamerecord',
error=_('CNAME record is not allowed to coexist '
'with any other record (RFC 1034, section 3.6.2)'))
# DNAME record validation
dnames = rrattrs.get('dnamerecord')
if dnames is not None:
if len(dnames) > 1:
raise errors.ValidationError(name='dnamerecord',
error=_('only one DNAME record is allowed per name '
'(RFC 6672, section 2.4)'))
# DNAME must not coexist with CNAME, but this is already checked earlier
# NS record validation
# NS record can coexist only with A, AAAA, DS, and other NS records (except zone apex)
# RFC 2181 section 6.1,
allowed_records = ['AAAA', 'A', 'DS', 'NS']
nsrecords = rrattrs.get('nsrecord')
if nsrecords and not self.is_pkey_zone_record(*keys):
for r_type in _record_types:
if (r_type not in allowed_records
and rrattrs.get('%srecord' % r_type.lower())
):
raise errors.ValidationError(
name='nsrecord',
error=_('NS record is not allowed to coexist with an '
'%(type)s record except when located in a '
'zone root record (RFC 2181, section 6.1)') %
{'type': r_type})
def check_record_type_dependencies(self, keys, rrattrs):
# Test that all record type dependencies are satisfied
# DS record validation
# DS record requires to coexists with NS record
dsrecords = rrattrs.get('dsrecord')
nsrecords = rrattrs.get('nsrecord')
# DS record cannot be in zone apex, checked in pre-callback validators
if dsrecords and not nsrecords:
raise errors.ValidationError(
name='dsrecord',
error=_('DS record requires to coexist with an '
'NS record (RFC 4592 section 4.6, RFC 4035 section 2.4)'))
def _entry2rrsets(self, entry_attrs, dns_name, dns_domain):
'''Convert entry_attrs to a dictionary {rdtype: rrset}.
:returns:
None if entry_attrs is None
{rdtype: None} if RRset of given type is empty
{rdtype: RRset} if RRset of given type is non-empty
'''
record_attr_suf = 'record'
ldap_rrsets = {}
if not entry_attrs:
# all records were deleted => name should not exist in DNS
return None
for attr, value in entry_attrs.iteritems():
if not attr.endswith(record_attr_suf):
continue
rdtype = dns.rdatatype.from_text(attr[0:-len(record_attr_suf)])
if not value:
ldap_rrsets[rdtype] = None # RRset is empty
continue
try:
# TTL here can be arbitrary value because it is ignored
# during comparison
ldap_rrset = dns.rrset.from_text(
dns_name, 86400, dns.rdataclass.IN, rdtype,
*map(str, value))
# make sure that all names are absolute so RRset
# comparison will work
for ldap_rr in ldap_rrset:
ldap_rr.choose_relativity(origin=dns_domain,
relativize=False)
ldap_rrsets[rdtype] = ldap_rrset
except dns.exception.SyntaxError as e:
self.log.error('DNS syntax error: %s %s %s: %s', dns_name,
dns.rdatatype.to_text(rdtype), value, e)
raise
return ldap_rrsets
def wait_for_modified_attr(self, ldap_rrset, rdtype, dns_name):
'''Wait until DNS resolver returns up-to-date answer for given RRset
or until the maximum number of attempts is reached.
Number of attempts is controlled by self.api.env['wait_for_dns'].
:param ldap_rrset:
None if given rdtype should not exist or
dns.rrset.RRset to match against data in DNS.
:param dns_name: FQDN to query
:type dns_name: dns.name.Name
:return: None if data in DNS and LDAP match
:raises errors.DNSDataMismatch: if data in DNS and LDAP doesn't match
:raises dns.exception.DNSException: if DNS resolution failed
'''
resolver = dns.resolver.Resolver()
resolver.set_flags(0) # disable recursion (for NS RR checks)
max_attempts = int(self.api.env['wait_for_dns'])
warn_attempts = max_attempts / 2
period = 1 # second
attempt = 0
log_fn = self.log.debug
log_fn('querying DNS server: expecting answer {%s}', ldap_rrset)
wait_template = 'waiting for DNS answer {%s}: got {%s} (attempt %s); '\
'waiting %s seconds before next try'
while attempt < max_attempts:
if attempt >= warn_attempts:
log_fn = self.log.warn
attempt += 1
try:
dns_answer = resolver.query(dns_name, rdtype,
dns.rdataclass.IN,
raise_on_no_answer=False)
dns_rrset = None
if rdtype == _NS:
# NS records can be in Authority section (sometimes)
dns_rrset = dns_answer.response.get_rrset(
dns_answer.response.authority, dns_name, _IN, rdtype)
if not dns_rrset:
# Look for NS and other data in Answer section
dns_rrset = dns_answer.rrset
if dns_rrset == ldap_rrset:
log_fn('DNS answer matches expectations (attempt %s)',
attempt)
return
log_msg = wait_template % (ldap_rrset, dns_answer.response,
attempt, period)
except (dns.resolver.NXDOMAIN,
dns.resolver.YXDOMAIN,
dns.resolver.NoNameservers,
dns.resolver.Timeout) as e:
if attempt >= max_attempts:
raise
else:
log_msg = wait_template % (ldap_rrset, type(e), attempt,
period)
log_fn(log_msg)
time.sleep(period)
# Maximum number of attempts was reached
else:
raise errors.DNSDataMismatch(expected=ldap_rrset, got=dns_rrset)
def wait_for_modified_attrs(self, entry_attrs, dns_name, dns_domain):
'''Wait until DNS resolver returns up-to-date answer for given entry
or until the maximum number of attempts is reached.
:param entry_attrs:
None if the entry was deleted from LDAP or
LDAPEntry instance containing at least all modified attributes.
:param dns_name: FQDN
:type dns_name: dns.name.Name
:raises errors.DNSDataMismatch: if data in DNS and LDAP doesn't match
'''
# represent data in LDAP as dictionary rdtype => rrset
ldap_rrsets = self._entry2rrsets(entry_attrs, dns_name, dns_domain)
nxdomain = ldap_rrsets is None
if nxdomain:
# name should not exist => ask for A record and check result
ldap_rrsets = {dns.rdatatype.from_text('A'): None}
for rdtype, ldap_rrset in ldap_rrsets.iteritems():
try:
self.wait_for_modified_attr(ldap_rrset, rdtype, dns_name)
except dns.resolver.NXDOMAIN as e:
if nxdomain:
continue
else:
e = errors.DNSDataMismatch(expected=ldap_rrset,
got="NXDOMAIN")
self.log.error(e)
raise e
except dns.resolver.NoNameservers as e:
# Do not raise exception if we have got SERVFAILs.
# Maybe the user has created an invalid zone intentionally.
self.log.warn('waiting for DNS answer {%s}: got {%s}; '
'ignoring', ldap_rrset, type(e))
continue
except dns.exception.DNSException as e:
err_desc = str(type(e))
err_str = str(e)
if err_str:
err_desc += ": %s" % err_str
e = errors.DNSDataMismatch(expected=ldap_rrset, got=err_desc)
self.log.error(e)
raise e
def wait_for_modified_entries(self, entries):
'''Call wait_for_modified_attrs for all entries in given dict.
:param entries:
Dict {(dns_domain, dns_name): entry_for_wait_for_modified_attrs}
'''
for entry_name, entry in entries.iteritems():
dns_domain = entry_name[0]
dns_name = entry_name[1].derelativize(dns_domain)
self.wait_for_modified_attrs(entry, dns_name, dns_domain)
def warning_if_ns_change_cause_fwzone_ineffective(self, result, *keys,
**options):
"""Detect if NS record change can make forward zones ineffective due
missing delegation. Run after parent's execute method.
"""
record_name_absolute = keys[-1]
zone = keys[-2]
if not record_name_absolute.is_absolute():
record_name_absolute = record_name_absolute.derelativize(zone)
affected_fw_zones, truncated = _find_subtree_forward_zones_ldap(
record_name_absolute)
if not affected_fw_zones:
return
for fwzone in affected_fw_zones:
_add_warning_fw_zone_is_not_effective(result, fwzone,
options['version'])
@register()
class dnsrecord_add(LDAPCreate):
__doc__ = _('Add new DNS resource record.')
no_option_msg = 'No options to add a specific record provided.\n' \
"Command help may be consulted for all supported record types."
takes_options = LDAPCreate.takes_options + (
Flag('force',
label=_('Force'),
flags=['no_option', 'no_output'],
doc=_('force NS record creation even if its hostname is not in DNS'),
),
dnsrecord.structured_flag,
)
def args_options_2_entry(self, *keys, **options):
self.obj.has_cli_options(options, self.no_option_msg)
return super(dnsrecord_add, self).args_options_2_entry(*keys, **options)
def interactive_prompt_callback(self, kw):
try:
self.obj.has_cli_options(kw, self.no_option_msg)
# Some DNS records were entered, do not use full interactive help
# We should still ask user for required parts of DNS parts he is
# trying to add in the same way we do for standard LDAP parameters
#
# Do not ask for required parts when any "extra" option is used,
# it can be used to fill all required params by itself
new_kw = {}
for rrparam in self.obj.iterate_rrparams_by_parts(kw, skip_extra=True):
user_options = rrparam.prompt_missing_parts(self, kw,
prompt_optional=False)
new_kw.update(user_options)
kw.update(new_kw)
return
except errors.OptionError:
pass
try:
idnsname = DNSName(kw['idnsname'])
except Exception, e:
raise errors.ValidationError(name='idnsname', error=unicode(e))
try:
zonename = DNSName(kw['dnszoneidnsname'])
except Exception, e:
raise errors.ValidationError(name='dnszoneidnsname', error=unicode(e))
# check zone type
if idnsname.is_empty():
common_types = u', '.join(_zone_top_record_types)
elif zonename.is_reverse():
common_types = u', '.join(_rev_top_record_types)
else:
common_types = u', '.join(_top_record_types)
self.Backend.textui.print_plain(_(u'Please choose a type of DNS resource record to be added'))
self.Backend.textui.print_plain(_(u'The most common types for this type of zone are: %s\n') %\
common_types)
ok = False
while not ok:
rrtype = self.Backend.textui.prompt(_(u'DNS resource record type'))
if rrtype is None:
return
try:
name = '%srecord' % rrtype.lower()
param = self.params[name]
if not isinstance(param, DNSRecord):
raise ValueError()
if not param.supported:
raise ValueError()
except (KeyError, ValueError):
all_types = u', '.join(_dns_supported_record_types)
self.Backend.textui.print_plain(_(u'Invalid or unsupported type. Allowed values are: %s') % all_types)
continue
ok = True
user_options = param.prompt_parts(self)
kw.update(user_options)
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
precallback_attrs = []
processed_attrs = []
for option in options:
try:
param = self.params[option]
except KeyError:
continue
rrparam = self.obj.get_rrparam_from_part(option)
if rrparam is None:
continue
if 'dnsrecord_part' in param.flags:
if rrparam.name in processed_attrs:
# this record was already entered
continue
if rrparam.name in entry_attrs:
# this record is entered both via parts and raw records
raise errors.ValidationError(name=param.cli_name or param.name,
error=_('Raw value of a DNS record was already set by "%(name)s" option') \
% dict(name=rrparam.cli_name or rrparam.name))
parts = rrparam.get_parts_from_kw(options)
dnsvalue = [rrparam._convert_scalar(parts)]
entry_attrs[rrparam.name] = dnsvalue
processed_attrs.append(rrparam.name)
continue
if 'dnsrecord_extra' in param.flags:
# do not run precallback for unset flags
if isinstance(param, Flag) and not options[option]:
continue
# extra option is passed, run per-type pre_callback for given RR type
precallback_attrs.append(rrparam.name)
# Run pre_callback validators
self.obj.run_precallback_validators(dn, entry_attrs, *keys, **options)
# run precallback also for all new RR type attributes in entry_attrs
for attr in entry_attrs.keys():
try:
param = self.params[attr]
except KeyError:
continue
if not isinstance(param, DNSRecord):
continue
precallback_attrs.append(attr)
precallback_attrs = list(set(precallback_attrs))
for attr in precallback_attrs:
# run per-type
try:
param = self.params[attr]
except KeyError:
continue
param.dnsrecord_add_pre_callback(ldap, dn, entry_attrs, attrs_list, *keys, **options)
# Store all new attrs so that DNSRecord post callback is called for
# new attributes only and not for all attributes in the LDAP entry
setattr(context, 'dnsrecord_precallback_attrs', precallback_attrs)
# We always want to retrieve all DNS record attributes to test for
# record type collisions (#2601)
try:
old_entry = ldap.get_entry(dn, _record_attributes)
except errors.NotFound:
old_entry = None
else:
for attr in entry_attrs.keys():
if attr not in _record_attributes:
continue
if entry_attrs[attr] is None:
entry_attrs[attr] = []
if not isinstance(entry_attrs[attr], (tuple, list)):
vals = [entry_attrs[attr]]
else:
vals = list(entry_attrs[attr])
entry_attrs[attr] = list(set(old_entry.get(attr, []) + vals))
rrattrs = self.obj.updated_rrattrs(old_entry, entry_attrs)
self.obj.check_record_type_dependencies(keys, rrattrs)
self.obj.check_record_type_collisions(keys, rrattrs)
context.dnsrecord_entry_mods = getattr(context, 'dnsrecord_entry_mods',
{})
context.dnsrecord_entry_mods[(keys[0], keys[1])] = entry_attrs.copy()
return dn
def exc_callback(self, keys, options, exc, call_func, *call_args, **call_kwargs):
if call_func.func_name == 'add_entry':
if isinstance(exc, errors.DuplicateEntry):
# A new record is being added to existing LDAP DNS object
# Update can be safely run as old record values has been
# already merged in pre_callback
ldap = self.obj.backend
entry_attrs = self.obj.get_record_entry_attrs(call_args[0])
update = ldap.get_entry(entry_attrs.dn, entry_attrs.keys())
update.update(entry_attrs)
ldap.update_entry(update, **call_kwargs)
return
raise exc
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
for attr in getattr(context, 'dnsrecord_precallback_attrs', []):
param = self.params[attr]
param.dnsrecord_add_post_callback(ldap, dn, entry_attrs, *keys, **options)
if self.obj.is_pkey_zone_record(*keys):
entry_attrs[self.obj.primary_key.name] = [_dns_zone_record]
self.obj.postprocess_record(entry_attrs, **options)
if self.api.env['wait_for_dns']:
self.obj.wait_for_modified_entries(context.dnsrecord_entry_mods)
return dn
@register()
class dnsrecord_mod(LDAPUpdate):
__doc__ = _('Modify a DNS resource record.')
no_option_msg = 'No options to modify a specific record provided.'
takes_options = LDAPUpdate.takes_options + (
dnsrecord.structured_flag,
)
def args_options_2_entry(self, *keys, **options):
self.obj.has_cli_options(options, self.no_option_msg, True)
return super(dnsrecord_mod, self).args_options_2_entry(*keys, **options)
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
if options.get('rename') and self.obj.is_pkey_zone_record(*keys):
# zone rename is not allowed
raise errors.ValidationError(name='rename',
error=_('DNS zone root record cannot be renamed'))
# check if any attr should be updated using structured instead of replaced
# format is recordname : (old_value, new_parts)
updated_attrs = {}
for param in self.obj.iterate_rrparams_by_parts(options, skip_extra=True):
parts = param.get_parts_from_kw(options, raise_on_none=False)
if parts is None:
# old-style modification
continue
old_value = entry_attrs.get(param.name)
if not old_value:
raise errors.RequirementError(name=param.name)
if isinstance(old_value, (tuple, list)):
if len(old_value) > 1:
raise errors.ValidationError(name=param.name,
error=_('DNS records can be only updated one at a time'))
old_value = old_value[0]
updated_attrs[param.name] = (old_value, parts)
# Run pre_callback validators
self.obj.run_precallback_validators(dn, entry_attrs, *keys, **options)
# current entry is needed in case of per-dns-record-part updates and
# for record type collision check
try:
old_entry = ldap.get_entry(dn, _record_attributes)
except errors.NotFound:
self.obj.handle_not_found(*keys)
if updated_attrs:
for attr in updated_attrs:
param = self.params[attr]
old_dnsvalue, new_parts = updated_attrs[attr]
if old_dnsvalue not in old_entry.get(attr, []):
attr_name = unicode(param.label or param.name)
raise errors.AttrValueNotFound(attr=attr_name,
value=old_dnsvalue)
old_entry[attr].remove(old_dnsvalue)
old_parts = param._get_part_values(old_dnsvalue)
modified_parts = tuple(part if part is not None else old_parts[part_id] \
for part_id,part in enumerate(new_parts))
new_dnsvalue = [param._convert_scalar(modified_parts)]
entry_attrs[attr] = list(set(old_entry[attr] + new_dnsvalue))
rrattrs = self.obj.updated_rrattrs(old_entry, entry_attrs)
self.obj.check_record_type_dependencies(keys, rrattrs)
self.obj.check_record_type_collisions(keys, rrattrs)
context.dnsrecord_entry_mods = getattr(context, 'dnsrecord_entry_mods',
{})
context.dnsrecord_entry_mods[(keys[0], keys[1])] = entry_attrs.copy()
return dn
def execute(self, *keys, **options):
result = super(dnsrecord_mod, self).execute(*keys, **options)
# remove if empty
if not self.obj.is_pkey_zone_record(*keys):
rename = options.get('rename')
if rename is not None:
keys = keys[:-1] + (rename,)
dn = self.obj.get_dn(*keys, **options)
ldap = self.obj.backend
old_entry = ldap.get_entry(dn, _record_attributes)
del_all = True
for attr in old_entry.keys():
if old_entry[attr]:
del_all = False
break
if del_all:
result = self.obj.methods.delentry(*keys,
version=options['version'])
# we need to modify delete result to match mod output type
# only one value is expected, not a list
if client_has_capability(options['version'], 'primary_key_types'):
assert len(result['value']) == 1
result['value'] = result['value'][0]
# indicate that entry was deleted
context.dnsrecord_entry_mods[(keys[0], keys[1])] = None
if self.api.env['wait_for_dns']:
self.obj.wait_for_modified_entries(context.dnsrecord_entry_mods)
if 'nsrecord' in options:
self.obj.warning_if_ns_change_cause_fwzone_ineffective(result,
*keys,
**options)
return result
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
if self.obj.is_pkey_zone_record(*keys):
entry_attrs[self.obj.primary_key.name] = [_dns_zone_record]
self.obj.postprocess_record(entry_attrs, **options)
return dn
def interactive_prompt_callback(self, kw):
try:
self.obj.has_cli_options(kw, self.no_option_msg, True)
except errors.OptionError:
pass
else:
# some record type entered, skip this helper
return
# get DNS record first so that the NotFound exception is raised
# before the helper would start
dns_record = api.Command['dnsrecord_show'](kw['dnszoneidnsname'], kw['idnsname'])['result']
rec_types = [rec_type for rec_type in dns_record if rec_type in _record_attributes]
self.Backend.textui.print_plain(_("No option to modify specific record provided."))
# ask user for records to be removed
self.Backend.textui.print_plain(_(u'Current DNS record contents:\n'))
record_params = []
for attr in dns_record:
try:
param = self.params[attr]
except KeyError:
continue
if not isinstance(param, DNSRecord):
continue
record_params.append(param)
rec_type_content = u', '.join(dns_record[param.name])
self.Backend.textui.print_plain(u'%s: %s' % (param.label, rec_type_content))
self.Backend.textui.print_plain(u'')
# ask what records to remove
for param in record_params:
rec_values = list(dns_record[param.name])
for rec_value in dns_record[param.name]:
rec_values.remove(rec_value)
mod_value = self.Backend.textui.prompt_yesno(
_("Modify %(name)s '%(value)s'?") % dict(name=param.label, value=rec_value), default=False)
if mod_value is True:
user_options = param.prompt_parts(self, mod_dnsvalue=rec_value)
kw[param.name] = [rec_value]
kw.update(user_options)
if rec_values:
self.Backend.textui.print_plain(ngettext(
u'%(count)d %(type)s record skipped. Only one value per DNS record type can be modified at one time.',
u'%(count)d %(type)s records skipped. Only one value per DNS record type can be modified at one time.',
0) % dict(count=len(rec_values), type=param.rrtype))
break
@register()
class dnsrecord_delentry(LDAPDelete):
"""
Delete DNS record entry.
"""
msg_summary = _('Deleted record "%(value)s"')
NO_CLI = True
@register()
class dnsrecord_del(LDAPUpdate):
__doc__ = _('Delete DNS resource record.')
has_output = output.standard_multi_delete
no_option_msg = _('Neither --del-all nor options to delete a specific record provided.\n'\
"Command help may be consulted for all supported record types.")
takes_options = (
Flag('del_all',
default=False,
label=_('Delete all associated records'),
),
dnsrecord.structured_flag,
)
def get_options(self):
for option in super(dnsrecord_del, self).get_options():
if any(flag in option.flags for flag in \
('dnsrecord_part', 'dnsrecord_extra',)):
continue
elif option.name in ('rename', ):
# options only valid for dnsrecord-mod
continue
elif isinstance(option, DNSRecord):
yield option.clone(option_group=None)
continue
yield option
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
try:
old_entry = ldap.get_entry(dn, _record_attributes)
except errors.NotFound:
self.obj.handle_not_found(*keys)
for attr in entry_attrs.keys():
if attr not in _record_attributes:
continue
if not isinstance(entry_attrs[attr], (tuple, list)):
vals = [entry_attrs[attr]]
else:
vals = entry_attrs[attr]
for val in vals:
try:
old_entry[attr].remove(val)
except (KeyError, ValueError):
try:
param = self.params[attr]
attr_name = unicode(param.label or param.name)
except:
attr_name = attr
raise errors.AttrValueNotFound(attr=attr_name, value=val)
entry_attrs[attr] = list(set(old_entry[attr]))
rrattrs = self.obj.updated_rrattrs(old_entry, entry_attrs)
self.obj.check_record_type_dependencies(keys, rrattrs)
del_all = False
if not self.obj.is_pkey_zone_record(*keys):
record_found = False
for attr in old_entry.keys():
if old_entry[attr]:
record_found = True
break
del_all = not record_found
# set del_all flag in context
# when the flag is enabled, the entire DNS record object is deleted
# in a post callback
context.del_all = del_all
context.dnsrecord_entry_mods = getattr(context, 'dnsrecord_entry_mods',
{})
context.dnsrecord_entry_mods[(keys[0], keys[1])] = entry_attrs.copy()
return dn
def execute(self, *keys, **options):
if options.get('del_all', False):
if self.obj.is_pkey_zone_record(*keys):
raise errors.ValidationError(
name='del_all',
error=_('Zone record \'%s\' cannot be deleted') \
% _dns_zone_record
)
result = self.obj.methods.delentry(*keys,
version=options['version'])
if self.api.env['wait_for_dns']:
entries = {(keys[0], keys[1]): None}
self.obj.wait_for_modified_entries(entries)
else:
result = super(dnsrecord_del, self).execute(*keys, **options)
result['value'] = pkey_to_value([keys[-1]], options)
if getattr(context, 'del_all', False) and not \
self.obj.is_pkey_zone_record(*keys):
result = self.obj.methods.delentry(*keys,
version=options['version'])
context.dnsrecord_entry_mods[(keys[0], keys[1])] = None
if self.api.env['wait_for_dns']:
self.obj.wait_for_modified_entries(context.dnsrecord_entry_mods)
if 'nsrecord' in options or options.get('del_all', False):
self.obj.warning_if_ns_change_cause_fwzone_ineffective(result,
*keys,
**options)
return result
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
if self.obj.is_pkey_zone_record(*keys):
entry_attrs[self.obj.primary_key.name] = [_dns_zone_record]
self.obj.postprocess_record(entry_attrs, **options)
return dn
def args_options_2_entry(self, *keys, **options):
self.obj.has_cli_options(options, self.no_option_msg)
return super(dnsrecord_del, self).args_options_2_entry(*keys, **options)
def interactive_prompt_callback(self, kw):
if kw.get('del_all', False):
return
try:
self.obj.has_cli_options(kw, self.no_option_msg)
except errors.OptionError:
pass
else:
# some record type entered, skip this helper
return
# get DNS record first so that the NotFound exception is raised
# before the helper would start
dns_record = api.Command['dnsrecord_show'](kw['dnszoneidnsname'], kw['idnsname'])['result']
rec_types = [rec_type for rec_type in dns_record if rec_type in _record_attributes]
self.Backend.textui.print_plain(_("No option to delete specific record provided."))
user_del_all = self.Backend.textui.prompt_yesno(_("Delete all?"), default=False)
if user_del_all is True:
kw['del_all'] = True
return
# ask user for records to be removed
self.Backend.textui.print_plain(_(u'Current DNS record contents:\n'))
present_params = []
for attr in dns_record:
try:
param = self.params[attr]
except KeyError:
continue
if not isinstance(param, DNSRecord):
continue
present_params.append(param)
rec_type_content = u', '.join(dns_record[param.name])
self.Backend.textui.print_plain(u'%s: %s' % (param.label, rec_type_content))
self.Backend.textui.print_plain(u'')
# ask what records to remove
for param in present_params:
deleted_values = []
for rec_value in dns_record[param.name]:
user_del_value = self.Backend.textui.prompt_yesno(
_("Delete %(name)s '%(value)s'?")
% dict(name=param.label, value=rec_value), default=False)
if user_del_value is True:
deleted_values.append(rec_value)
if deleted_values:
kw[param.name] = tuple(deleted_values)
@register()
class dnsrecord_show(LDAPRetrieve):
__doc__ = _('Display DNS resource.')
takes_options = LDAPRetrieve.takes_options + (
dnsrecord.structured_flag,
)
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
if self.obj.is_pkey_zone_record(*keys):
entry_attrs[self.obj.primary_key.name] = [_dns_zone_record]
self.obj.postprocess_record(entry_attrs, **options)
return dn
@register()
class dnsrecord_find(LDAPSearch):
__doc__ = _('Search for DNS resources.')
takes_options = LDAPSearch.takes_options + (
dnsrecord.structured_flag,
)
def get_options(self):
for option in super(dnsrecord_find, self).get_options():
if any(flag in option.flags for flag in \
('dnsrecord_part', 'dnsrecord_extra',)):
continue
elif isinstance(option, DNSRecord):
yield option.clone(option_group=None)
continue
yield option
def pre_callback(self, ldap, filter, attrs_list, base_dn, scope, *args, **options):
assert isinstance(base_dn, DN)
# validate if zone is master zone
self.obj.check_zone(args[-2], **options)
filter = _create_idn_filter(self, ldap, *args, **options)
return (filter, base_dn, ldap.SCOPE_SUBTREE)
def post_callback(self, ldap, entries, truncated, *args, **options):
if entries:
zone_obj = self.api.Object[self.obj.parent_object]
zone_dn = zone_obj.get_dn(args[0])
if entries[0].dn == zone_dn:
entries[0][zone_obj.primary_key.name] = [_dns_zone_record]
for entry in entries:
self.obj.postprocess_record(entry, **options)
return truncated
@register()
class dns_resolve(Command):
__doc__ = _('Resolve a host name in DNS.')
has_output = output.standard_value
msg_summary = _('Found \'%(value)s\'')
takes_args = (
Str('hostname',
label=_('Hostname'),
),
)
def execute(self, *args, **options):
query=args[0]
if query.find(api.env.domain) == -1 and query.find('.') == -1:
query = '%s.%s.' % (query, api.env.domain)
if query[-1] != '.':
query = query + '.'
if not is_host_resolvable(query):
raise errors.NotFound(
reason=_('Host \'%(host)s\' not found') % {'host': query}
)
return dict(result=True, value=query)
@register()
class dns_is_enabled(Command):
"""
Checks if any of the servers has the DNS service enabled.
"""
NO_CLI = True
has_output = output.standard_value
base_dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
filter = '(&(objectClass=ipaConfigObject)(cn=DNS))'
def execute(self, *args, **options):
ldap = self.api.Backend.ldap2
dns_enabled = False
try:
ent = ldap.find_entries(filter=self.filter, base_dn=self.base_dn)
if len(ent):
dns_enabled = True
except Exception, e:
pass
return dict(result=dns_enabled, value=pkey_to_value(None, options))
@register()
class dnsconfig(LDAPObject):
"""
DNS global configuration object
"""
object_name = _('DNS configuration options')
default_attributes = [
'idnsforwardpolicy', 'idnsforwarders', 'idnsallowsyncptr'
]
label = _('DNS Global Configuration')
label_singular = _('DNS Global Configuration')
takes_params = (
Str('idnsforwarders*',
_validate_bind_forwarder,
cli_name='forwarder',
label=_('Global forwarders'),
doc=_('Global forwarders. A custom port can be specified for each '
'forwarder using a standard format "IP_ADDRESS port PORT"'),
csv=True,
),
StrEnum('idnsforwardpolicy?',
cli_name='forward_policy',
label=_('Forward policy'),
doc=_('Global forwarding policy. Set to "none" to disable '
'any configured global forwarders.'),
values=(u'only', u'first', u'none'),
),
Bool('idnsallowsyncptr?',
cli_name='allow_sync_ptr',
label=_('Allow PTR sync'),
doc=_('Allow synchronization of forward (A, AAAA) and reverse (PTR) records'),
),
DeprecatedParam('idnszonerefresh?',
cli_name='zone_refresh',
label=_('Zone refresh interval'),
),
)
managed_permissions = {
'System: Write DNS Configuration': {
'non_object': True,
'ipapermright': {'write'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=dns', api.env.basedn),
'ipapermtargetfilter': ['(objectclass=idnsConfigObject)'],
'ipapermdefaultattr': {
'idnsallowsyncptr', 'idnsforwarders', 'idnsforwardpolicy',
'idnspersistentsearch', 'idnszonerefresh'
},
'replaces': [
'(targetattr = "idnsforwardpolicy || idnsforwarders || idnsallowsyncptr || idnszonerefresh || idnspersistentsearch")(target = "ldap:///cn=dns,$SUFFIX")(version 3.0;acl "permission:Write DNS Configuration";allow (write) groupdn = "ldap:///cn=Write DNS Configuration,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'DNS Administrators', 'DNS Servers'},
},
'System: Read DNS Configuration': {
'non_object': True,
'ipapermright': {'read'},
'ipapermlocation': api.env.basedn,
'ipapermtarget': DN('cn=dns', api.env.basedn),
'ipapermtargetfilter': ['(objectclass=idnsConfigObject)'],
'ipapermdefaultattr': {
'objectclass',
'idnsallowsyncptr', 'idnsforwarders', 'idnsforwardpolicy',
'idnspersistentsearch', 'idnszonerefresh'
},
'default_privileges': {'DNS Administrators', 'DNS Servers'},
},
}
def get_dn(self, *keys, **kwargs):
if not dns_container_exists(self.api.Backend.ldap2):
raise errors.NotFound(reason=_('DNS is not configured'))
return DN(api.env.container_dns, api.env.basedn)
def get_dnsconfig(self, ldap):
entry = ldap.get_entry(self.get_dn(), None)
return entry
def postprocess_result(self, result):
if not any(param in result['result'] for param in self.params):
result['summary'] = unicode(_('Global DNS configuration is empty'))
@register()
class dnsconfig_mod(LDAPUpdate):
__doc__ = _('Modify global DNS configuration.')
def interactive_prompt_callback(self, kw):
# show informative message on client side
# server cannot send messages asynchronous
if kw.get('idnsforwarders', False):
self.Backend.textui.print_plain(
_("Server will check DNS forwarder(s)."))
self.Backend.textui.print_plain(
_("This may take some time, please wait ..."))
def execute(self, *keys, **options):
# test dnssec forwarders
forwarders = options.get('idnsforwarders')
result = super(dnsconfig_mod, self).execute(*keys, **options)
self.obj.postprocess_result(result)
if forwarders:
for forwarder in forwarders:
try:
validate_dnssec_global_forwarder(forwarder, log=self.log)
except DNSSECSignatureMissingError as e:
messages.add_message(
options['version'],
result, messages.DNSServerDoesNotSupportDNSSECWarning(
server=forwarder, error=e,
)
)
except EDNS0UnsupportedError as e:
messages.add_message(
options['version'],
result, messages.DNSServerDoesNotSupportEDNS0Warning(
server=forwarder, error=e,
)
)
except UnresolvableRecordError as e:
messages.add_message(
options['version'],
result, messages.DNSServerValidationWarning(
server=forwarder, error=e
)
)
return result
@register()
class dnsconfig_show(LDAPRetrieve):
__doc__ = _('Show the current global DNS configuration.')
def execute(self, *keys, **options):
result = super(dnsconfig_show, self).execute(*keys, **options)
self.obj.postprocess_result(result)
return result
@register()
class dnsforwardzone(DNSZoneBase):
"""
DNS Forward zone, container for resource records.
"""
object_name = _('DNS forward zone')
object_name_plural = _('DNS forward zones')
object_class = DNSZoneBase.object_class + ['idnsforwardzone']
label = _('DNS Forward Zones')
label_singular = _('DNS Forward Zone')
default_forward_policy = u'first'
# managed_permissions: permissions was apllied in dnszone class, do NOT
# add them here, they should not be applied twice.
def _warning_fw_zone_is_not_effective(self, result, *keys, **options):
fwzone = keys[-1]
_add_warning_fw_zone_is_not_effective(result, fwzone,
options['version'])
def _warning_if_forwarders_do_not_work(self, result, new_zone,
*keys, **options):
fwzone = keys[-1]
forwarders = options.get('idnsforwarders', [])
any_forwarder_work = False
for forwarder in forwarders:
try:
validate_dnssec_zone_forwarder_step1(forwarder, fwzone,
log=self.log)
except UnresolvableRecordError as e:
messages.add_message(
options['version'],
result, messages.DNSServerValidationWarning(
server=forwarder, error=e
)
)
except EDNS0UnsupportedError as e:
messages.add_message(
options['version'],
result, messages.DNSServerDoesNotSupportEDNS0Warning(
server=forwarder, error=e
)
)
else:
any_forwarder_work = True
if not any_forwarder_work:
# do not test DNSSEC validation if there is no valid forwarder
return
# resolve IP address of any DNS replica
# FIXME: https://fedorahosted.org/bind-dyndb-ldap/ticket/143
# we currenly should to test all IPA DNS replica, because DNSSEC
# validation is configured just in named.conf per replica
ipa_dns_masters = [normalize_zone(x) for x in
api.Object.dnsrecord.get_dns_masters()]
if not ipa_dns_masters:
# something very bad happened, DNS is installed, but no IPA DNS
# servers available
self.log.error("No IPA DNS server can be found, but integrated DNS "
"is installed")
return
ipa_dns_ip = None
for rdtype in (dns.rdatatype.A, dns.rdatatype.AAAA):
try:
ans = dns.resolver.query(ipa_dns_masters[0], rdtype)
except dns.exception.DNSException:
continue
else:
ipa_dns_ip = str(ans.rrset.items[0])
break
if not ipa_dns_ip:
self.log.error("Cannot resolve %s hostname", ipa_dns_masters[0])
return
# sleep a bit, adding new zone to BIND from LDAP may take a while
if new_zone:
time.sleep(5)
# Test if IPA is able to receive replies from forwarders
try:
validate_dnssec_zone_forwarder_step2(ipa_dns_ip, fwzone,
log=self.log)
except DNSSECValidationError as e:
messages.add_message(
options['version'],
result, messages.DNSSECValidationFailingWarning(error=e)
)
except UnresolvableRecordError as e:
messages.add_message(
options['version'],
result, messages.DNSServerValidationWarning(
server=ipa_dns_ip, error=e
)
)
@register()
class dnsforwardzone_add(DNSZoneBase_add):
__doc__ = _('Create new DNS forward zone.')
def interactive_prompt_callback(self, kw):
# show informative message on client side
# server cannot send messages asynchronous
if kw.get('idnsforwarders', False):
self.Backend.textui.print_plain(
_("Server will check DNS forwarder(s)."))
self.Backend.textui.print_plain(
_("This may take some time, please wait ..."))
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
dn = super(dnsforwardzone_add, self).pre_callback(ldap, dn,
entry_attrs, attrs_list, *keys, **options)
if 'idnsforwardpolicy' not in entry_attrs:
entry_attrs['idnsforwardpolicy'] = self.obj.default_forward_policy
if (not entry_attrs.get('idnsforwarders') and
entry_attrs['idnsforwardpolicy'] != u'none'):
raise errors.ValidationError(name=u'idnsforwarders',
error=_('Please specify forwarders.'))
return dn
def execute(self, *keys, **options):
result = super(dnsforwardzone_add, self).execute(*keys, **options)
self.obj._warning_fw_zone_is_not_effective(result, *keys, **options)
if options.get('idnsforwarders'):
print result, keys, options
self.obj._warning_if_forwarders_do_not_work(
result, True, *keys, **options)
return result
@register()
class dnsforwardzone_del(DNSZoneBase_del):
__doc__ = _('Delete DNS forward zone.')
msg_summary = _('Deleted DNS forward zone "%(value)s"')
@register()
class dnsforwardzone_mod(DNSZoneBase_mod):
__doc__ = _('Modify DNS forward zone.')
def interactive_prompt_callback(self, kw):
# show informative message on client side
# server cannot send messages asynchronous
if kw.get('idnsforwarders', False):
self.Backend.textui.print_plain(
_("Server will check DNS forwarder(s)."))
self.Backend.textui.print_plain(
_("This may take some time, please wait ..."))
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
try:
entry = ldap.get_entry(dn)
except errors.NotFound:
self.obj.handle_not_found(*keys)
if not _check_entry_objectclass(entry, self.obj.object_class):
self.obj.handle_not_found(*keys)
policy = self.obj.default_forward_policy
forwarders = []
if 'idnsforwarders' in entry_attrs:
forwarders = entry_attrs['idnsforwarders']
elif 'idnsforwarders' in entry:
forwarders = entry['idnsforwarders']
if 'idnsforwardpolicy' in entry_attrs:
policy = entry_attrs['idnsforwardpolicy']
elif 'idnsforwardpolicy' in entry:
policy = entry['idnsforwardpolicy']
if not forwarders and policy != u'none':
raise errors.ValidationError(name=u'idnsforwarders',
error=_('Please specify forwarders.'))
return dn
def execute(self, *keys, **options):
result = super(dnsforwardzone_mod, self).execute(*keys, **options)
if options.get('idnsforwarders'):
self.obj._warning_if_forwarders_do_not_work(result, False, *keys,
**options)
return result
@register()
class dnsforwardzone_find(DNSZoneBase_find):
__doc__ = _('Search for DNS forward zones.')
@register()
class dnsforwardzone_show(DNSZoneBase_show):
__doc__ = _('Display information about a DNS forward zone.')
has_output_params = LDAPRetrieve.has_output_params + dnszone_output_params
@register()
class dnsforwardzone_disable(DNSZoneBase_disable):
__doc__ = _('Disable DNS Forward Zone.')
msg_summary = _('Disabled DNS forward zone "%(value)s"')
@register()
class dnsforwardzone_enable(DNSZoneBase_enable):
__doc__ = _('Enable DNS Forward Zone.')
msg_summary = _('Enabled DNS forward zone "%(value)s"')
def execute(self, *keys, **options):
result = super(dnsforwardzone_enable, self).execute(*keys, **options)
self.obj._warning_fw_zone_is_not_effective(result, *keys, **options)
return result
@register()
class dnsforwardzone_add_permission(DNSZoneBase_add_permission):
__doc__ = _('Add a permission for per-forward zone access delegation.')
@register()
class dnsforwardzone_remove_permission(DNSZoneBase_remove_permission):
__doc__ = _('Remove a permission for per-forward zone access delegation.')
|
h00dy/Diamond | refs/heads/master | src/collectors/nfs/test/testnfs.py | 29 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from nfs import NfsCollector
################################################################################
class TestNfsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NfsCollector', {
'interval': 1
})
self.collector = NfsCollector(config, None)
def test_import(self):
self.assertTrue(NfsCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_proc_stat(self, publish_mock, open_mock):
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/rpc/nfs')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_rhel5(self, publish_mock):
NfsCollector.PROC = self.getFixturePath('rhel5-1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
NfsCollector.PROC = self.getFixturePath('rhel5-2')
self.collector.collect()
metrics = {
'net.packets': 0.0,
'net.tcpcnt': 0.0,
'net.tcpconn': 0.0,
'net.udpcnt': 0.0,
'rpc.authrefrsh': 0.0,
'rpc.calls': 8042864.0,
'rpc.retrans': 0.0,
'v2.create': 0.0,
'v2.fsstat': 0.0,
'v2.getattr': 0.0,
'v2.link': 0.0,
'v2.lookup': 0.0,
'v2.mkdir': 0.0,
'v2.null': 0.0,
'v2.read': 0.0,
'v2.readdir': 0.0,
'v2.readlink': 0.0,
'v2.remove': 0.0,
'v2.rename': 0.0,
'v2.rmdir': 0.0,
'v2.root': 0.0,
'v2.setattr': 0.0,
'v2.symlink': 0.0,
'v2.wrcache': 0.0,
'v2.write': 0.0,
'v3.access': 40672.0,
'v3.commit': 0.0,
'v3.create': 91.0,
'v3.fsinfo': 0.0,
'v3.fsstat': 20830.0,
'v3.getattr': 162507.0,
'v3.link': 0.0,
'v3.lookup': 89.0,
'v3.mkdir': 0.0,
'v3.mknod': 0.0,
'v3.null': 0.0,
'v3.pathconf': 0.0,
'v3.read': 6093419.0,
'v3.readdir': 4002.0,
'v3.readdirplus': 0.0,
'v3.readlink': 0.0,
'v3.remove': 9.0,
'v3.rename': 0.0,
'v3.rmdir': 0.0,
'v3.setattr': 8640.0,
'v3.symlink': 0.0,
'v3.write': 1712605.0,
'v4.access': 0.0,
'v4.close': 0.0,
'v4.commit': 0.0,
'v4.confirm': 0.0,
'v4.create': 0.0,
'v4.delegreturn': 0.0,
'v4.fs_locations': 0.0,
'v4.fsinfo': 0.0,
'v4.getacl': 0.0,
'v4.getattr': 0.0,
'v4.link': 0.0,
'v4.lock': 0.0,
'v4.lockt': 0.0,
'v4.locku': 0.0,
'v4.lookup': 0.0,
'v4.lookup_root': 0.0,
'v4.null': 0.0,
'v4.open': 0.0,
'v4.open_conf': 0.0,
'v4.open_dgrd': 0.0,
'v4.open_noat': 0.0,
'v4.pathconf': 0.0,
'v4.read': 0.0,
'v4.readdir': 0.0,
'v4.readlink': 0.0,
'v4.rel_lkowner': 0.0,
'v4.remove': 0.0,
'v4.rename': 0.0,
'v4.renew': 0.0,
'v4.server_caps': 0.0,
'v4.setacl': 0.0,
'v4.setattr': 0.0,
'v4.setclntid': 0.0,
'v4.statfs': 0.0,
'v4.symlink': 0.0,
'v4.write': 0.0
}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_rhel6(self, publish_mock):
NfsCollector.PROC = self.getFixturePath('rhel6-1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
NfsCollector.PROC = self.getFixturePath('rhel6-2')
self.collector.collect()
metrics = {
'net.packets': 0.0,
'net.tcpcnt': 0.0,
'net.tcpconn': 0.0,
'net.udpcnt': 0.0,
'rpc.authrefrsh': 32.0,
'rpc.calls': 32.0,
'rpc.retrans': 0.0,
'v2.create': 0.0,
'v2.fsstat': 0.0,
'v2.getattr': 0.0,
'v2.link': 0.0,
'v2.lookup': 0.0,
'v2.mkdir': 0.0,
'v2.null': 0.0,
'v2.read': 0.0,
'v2.readdir': 0.0,
'v2.readlink': 0.0,
'v2.remove': 0.0,
'v2.rename': 0.0,
'v2.rmdir': 0.0,
'v2.root': 0.0,
'v2.setattr': 0.0,
'v2.symlink': 0.0,
'v2.wrcache': 0.0,
'v2.write': 0.0,
'v3.access': 6.0,
'v3.commit': 0.0,
'v3.create': 0.0,
'v3.fsinfo': 0.0,
'v3.fsstat': 17.0,
'v3.getattr': 7.0,
'v3.link': 0.0,
'v3.lookup': 0.0,
'v3.mkdir': 0.0,
'v3.mknod': 0.0,
'v3.null': 0.0,
'v3.pathconf': 0.0,
'v3.read': 0.0,
'v3.readdir': 0.0,
'v3.readdirplus': 0.0,
'v3.readlink': 0.0,
'v3.remove': 0.0,
'v3.rename': 0.0,
'v3.rmdir': 0.0,
'v3.setattr': 1.0,
'v3.symlink': 0.0,
'v3.write': 1.0,
'v4.access': 0.0,
'v4.close': 0.0,
'v4.commit': 0.0,
'v4.confirm': 0.0,
'v4.create': 0.0,
'v4.create_ses': 0.0,
'v4.delegreturn': 0.0,
'v4.destroy_ses': 0.0,
'v4.ds_write': 0.0,
'v4.exchange_id': 0.0,
'v4.fs_locations': 0.0,
'v4.fsinfo': 0.0,
'v4.get_lease_t': 0.0,
'v4.getacl': 0.0,
'v4.getattr': 0.0,
'v4.getdevinfo': 0.0,
'v4.getdevlist': 0.0,
'v4.layoutcommit': 0.0,
'v4.layoutget': 0.0,
'v4.layoutreturn': 0.0,
'v4.link': 0.0,
'v4.lock': 0.0,
'v4.lockt': 0.0,
'v4.locku': 0.0,
'v4.lookup': 0.0,
'v4.lookup_root': 0.0,
'v4.null': 0.0,
'v4.open': 0.0,
'v4.open_conf': 0.0,
'v4.open_dgrd': 0.0,
'v4.open_noat': 0.0,
'v4.pathconf': 0.0,
'v4.read': 0.0,
'v4.readdir': 0.0,
'v4.readlink': 0.0,
'v4.reclaim_comp': 0.0,
'v4.rel_lkowner': 0.0,
'v4.remove': 0.0,
'v4.rename': 0.0,
'v4.renew': 0.0,
'v4.sequence': 0.0,
'v4.server_caps': 0.0,
'v4.setacl': 0.0,
'v4.setattr': 0.0,
'v4.setclntid': 0.0,
'v4.statfs': 0.0,
'v4.symlink': 0.0,
'v4.write': 0.0,
}
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
access-missouri/am-django-project | refs/heads/master | am/ls_importer/__init__.py | 1 | """
Import from Legiscan.
"""
|
dgellis90/nipype | refs/heads/master | nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py | 1 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import MRIsCalc
def test_MRIsCalc_inputs():
input_map = dict(action=dict(argstr='%s',
mandatory=True,
position=-2,
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file1=dict(argstr='%s',
mandatory=True,
position=-3,
),
in_file2=dict(argstr='%s',
mandatory=False,
position=-1,
xor=['in_float', 'in_int'],
),
in_float=dict(argstr='%f',
mandatory=False,
position=-1,
xor=['in_file2', 'in_int'],
),
in_int=dict(argstr='%d',
mandatory=False,
position=-1,
xor=['in_file2', 'in_float'],
),
out_file=dict(argstr='-o %s',
genfile=True,
mandatory=False,
),
subjects_dir=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = MRIsCalc.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MRIsCalc_outputs():
output_map = dict(out_file=dict(),
)
outputs = MRIsCalc.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
gtko/CouchPotatoServer | refs/heads/develop | libs/requests/packages/chardet/euctwprober.py | 2993 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
|
mikebenfield/scikit-learn | refs/heads/master | examples/plot_multilabel.py | 236 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
jcfr/mystic | refs/heads/master | examples_UQ/TEST_surrogate_samples.py | 1 | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2009-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
DEBUG = False
PER_AI = True # if True, generate random_samples on each Ai
MCZERO = False # if True, McD[i] == 0 when STATUS[i] = SUCCESS
#######################################################################
# scaling and mpi info; also optimizer configuration parameters
# hard-wired: use DE solver, don't use mpi, F-F' calculation
# (similar to concentration.in)
#######################################################################
from TEST_surrogate_diam import * # model, limit
from mystic.math.stats import volume, prob_mass, mean, mcdiarmid_bound
from mystic.math.integrate import integrate as expectation_value
from mystic.math.samples import random_samples, sampled_pts, sampled_prob
from mystic.math.samples import alpha, _pof_given_samples as sampled_pof
from mystic.tools import wrap_bounds
def sampled_mean(pts,lb,ub):
from numpy import inf
f = wrap_bounds(model,lb,ub)
ave = 0; count = 0
for i in range(len(pts[0])):
Fx = f([pts[0][i],pts[1][i],pts[2][i]])
if Fx != -inf: # outside of bounds evaluates to -inf
ave += Fx
count += 1
if not count: return None #XXX: define 0/0 = None
ave = float(ave) / float(count)
return ave
def minF(x):
return scale * model(x)
def maxF(x):
return -scale * model(x)
#######################################################################
# the differential evolution optimizer
# (replaces the call to dakota)
#######################################################################
def optimize(cost,lb,ub):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import random_seed
random_seed(123)
#stepmon = VerboseMonitor(100)
stepmon = Monitor()
evalmon = Monitor()
ndim = len(lb) # [(1 + RVend) - RVstart] + 1
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
tol = convergence_tol
solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change)
solved = solver.bestSolution
#if DEBUG: print "solved: %s" % solved
diameter_squared = -solver.bestEnergy / scale #XXX: scale != 0
func_evals = solver.evaluations
return solved, diameter_squared, func_evals
#######################################################################
# loop over model parameters to calculate concentration of measure
# (similar to main.cc)
#######################################################################
def UQ(start,end,lower,upper):
params = []
diameters = []
function_evaluations = []
total_func_evals = 0
total_diameter = 0.0
for i in range(start,end+1):
lb = lower + [lower[i]]
ub = upper + [upper[i]]
# construct cost function and run optimizer
cost = costFactory(i)
# optimize, using no initial conditions
solved, subdiameter, func_evals = optimize(cost,lb,ub)
function_evaluations.append(func_evals)
diameters.append(subdiameter)
params.append(solved)
total_func_evals += function_evaluations[-1]
total_diameter += diameters[-1]
if DEBUG:
for solved in params:
print "solved: %s" % solved
print "subdiameters (squared): %s" % diameters
print "diameter (squared): %s" % total_diameter
print "func_evals: %s => %s" % (function_evaluations, total_func_evals)
return params, total_diameter, diameters
#######################################################################
# get solved_params, subdiameters, and prob_mass for a sliced cuboid
#######################################################################
PROBABILITY_MASS = []
SUB_DIAMETERS = []
TOTAL_DIAMETERS = []
SOLVED_PARAMETERS = []
NEW_SLICES = []
STATUS = [None]
TOTAL_CUTS = [0]
max_cuts = [9999]
SUCCESS = "S"
FAILURE = "F"
UNDERSAMPLED = "U"
def test_cuboids(lb,ub,RVstart,RVend,cuboid_volume):
probmass = []
subdiams = []
tot_diam = []
solved_p = []
# subdivisions
for i in range(len(lb)):
if DEBUG:
print "\n"
print " lower bounds: %s" % lb[i]
print " upper bounds: %s" % ub[i]
if i in NEW_SLICES or not NEW_SLICES:
subcuboid_volume = volume(lb[i],ub[i])
sub_prob_mass = prob_mass(subcuboid_volume,cuboid_volume)
probmass.append(sub_prob_mass)
if DEBUG: print " probability mass: %s" % sub_prob_mass
solved, diameter, subdiameters = UQ(RVstart,RVend,lb[i],ub[i])
solved_p.append(solved)
subdiams.append(subdiameters)
tot_diam.append(diameter)
else:
probmass.append(PROBABILITY_MASS[i])
if DEBUG: print " probability mass: %s" % PROBABILITY_MASS[i]
solved_p.append(SOLVED_PARAMETERS[i])
subdiams.append(SUB_DIAMETERS[i])
tot_diam.append(TOTAL_DIAMETERS[i])
return solved_p, subdiams, tot_diam, probmass
#######################################################################
# slice the cuboid
#######################################################################
def make_cut(lb,ub,RVStart,RVend,vol):
global STATUS #XXX: warning! global variable...
params, subdiams, diam, probmass = test_cuboids(lb,ub,RVstart,RVend,vol)
SOLVED_PARAMETERS, SUB_DIAMETERS = params, subdiams
TOTAL_DIAMETERS, PROBABILITY_MASS = diam, probmass
if DEBUG: print "\nSTATUS = %s" % STATUS
# get region with largest probability mass
# region = probmass.index(max(probmass))
# NEW_SLICES = [region,region+1]
newstatus = []
newslices = []
# find interesting regions (optimizer returns: solved, -Energy, f_evals)
for i in range(len(lb)):
if STATUS[i]: # status in [SUCCESS, FAILURE, UNDERSAMPLED]
newstatus.append(STATUS[i]) # previously determined to skip this region
elif not optimize(maxF,lb[i],ub[i])[1]: # if max A = 0, then 'failure'
newstatus.append(FAILURE) # mark as a failure region; skip
elif -(optimize(minF,lb[i],ub[i])[1]): # if min A > 0, then 'success'
newstatus.append(SUCCESS) # mark as a success region; skip
else:
newstatus.append(None) # each 'new' slice is a bisection
newstatus.append(None) # ... thus appends TWO indicatiors
newslices.append(i)
ncut = 0
NEW_SLICES = []
for i in newslices:
# get direction with largest subdiameter
direction = subdiams[i].index(max(subdiams[i]))
# adjust for ub,lb expanded by n slices
region = i + ncut
# get the midpoint
cutvalue = 0.5 * ( ub[region][direction] + lb[region][direction] )
# modify bounds to include cut plane
l = lb[:region+1]
l += [lb[region][:direction] + [cutvalue] + lb[region][direction+1:]]
lb = l + lb[region+1:]
u = ub[:region]
u += [ub[region][:direction] + [cutvalue] + ub[region][direction+1:]]
ub = u + ub[region:]
# bean counting...
NEW_SLICES.append(region)
NEW_SLICES.append(region+1)
ncut += 1
TOTAL_CUTS[0] += 1
if TOTAL_CUTS[0] >= max_cuts[0]:
print "\nmaximum number of cuts performed."
max_cuts[0] = 0
STATUS = newstatus[:i+1+ncut] + STATUS[i+1:] # patially use 'old' status
return lb,ub
STATUS = newstatus[:]
return lb,ub
#######################################################################
# rank, bounds, and restart information
# (similar to concentration.variables)
#######################################################################
if __name__ == '__main__':
from math import sqrt
function_name = "marc_surr"
lower_bounds = [60.0, 0.0, 2.1]
upper_bounds = [105.0, 30.0, 2.8]
RVstart = 0; RVend = 2
max_cut_iterations = 4 #NOTE: number of resulting subcuboids = cuts + 1
max_cuts[0] = 1 # maximum number of cuts
num_sample_points = 5 #NOTE: number of sample data points
print "...SETTINGS..."
print "npop = %s" % npop
print "maxiter = %s" % maxiter
print "maxfun = %s" % maxfun
print "convergence_tol = %s" % convergence_tol
print "crossover = %s" % crossover
print "percent_change = %s" % percent_change
print "..............\n\n"
print " model: f(x) = %s(x)" % function_name
RVmax = len(lower_bounds)
param_string = "["
for i in range(RVmax):
param_string += "'x%s'" % str(i+1)
if i == (RVmax - 1):
param_string += "]"
else:
param_string += ", "
print " parameters: %s" % param_string
# get diameter for entire cuboid
lb,ub = [lower_bounds],[upper_bounds]
cuboid_volume = volume(lb[0],ub[0])
params0, subdiams0, diam0, probmass0 = test_cuboids(lb,ub,RVstart,RVend,\
cuboid_volume)
SOLVED_PARAMETERS, SUB_DIAMETERS = params0, subdiams0
TOTAL_DIAMETERS, PROBABILITY_MASS = diam0, probmass0
if DEBUG: print "\nSTATUS = %s" % STATUS
if not DEBUG:
pts = random_samples(lb[0],ub[0])
pof = sampled_pof(model,pts)
print "Exact PoF: %s" % pof
# prepare new set of random samples (across entire domain) as 'data'
if not PER_AI:
pts = random_samples(lb[0],ub[0],num_sample_points)
for i in range(len(lb)):
print "\n"
print " lower bounds: %s" % lb[i]
print " upper bounds: %s" % ub[i]
for solved in params0[0]:
print "solved: %s" % solved
print "subdiameters (squared): %s" % subdiams0[0]
print "diameter (squared): %s" % diam0[0]
print " probability mass: %s" % probmass0[0]
expectation = expectation_value(model,lower_bounds,upper_bounds)
#print " expectation: %s" % expectation
mean_value = mean(expectation,cuboid_volume)
print " mean value: %s" % mean_value
if STATUS[0] == SUCCESS and MCZERO: #XXX: should be false, or we are done
mcdiarmid = 0.0
else:
mcdiarmid = mcdiarmid_bound(mean_value,sqrt(diam0[0]))
print "McDiarmid bound: %s" % mcdiarmid
# determine 'best' cuts to cuboid
for cut in range(max_cut_iterations):
if max_cuts[0]: #XXX: abort if max_cuts was set to zero
print "\n..... cut iteration #%s ....." % (cut+1)
lb,ub = make_cut(lb,ub,RVstart,RVend,cuboid_volume)
if DEBUG:
print "\n..... %s cuboids ....." % (cut+2) #XXX: ?; was max_cut_iterations+1
# get diameter for each subcuboid
params, subdiams, diam, probmass = test_cuboids(lb,ub,RVstart,RVend,\
cuboid_volume)
SOLVED_PARAMETERS, SUB_DIAMETERS = params, subdiams
TOTAL_DIAMETERS, PROBABILITY_MASS = diam, probmass
print "\nSTATUS = %s" % STATUS
if not DEBUG:
weighted_bound = []
sampled_bound = []
for i in range(len(lb)):
print "\n"
print " lower bounds: %s" % lb[i]
print " upper bounds: %s" % ub[i]
for solved in params[i]:
print "solved: %s" % solved
print "subdiameters (squared): %s" % subdiams[i]
print "diameter (squared): %s" % diam[i]
print " probability mass: %s" % probmass[i]
#calculate remainder of the statistics, McDiarmid for cube & subcuboids
subcuboid_volume = volume(lb[i],ub[i])
expect_value = expectation_value(model,lb[i],ub[i])
#print " expectation: %s" % expect_value
sub_mean_value = mean(expect_value,subcuboid_volume)
print " mean value: %s" % sub_mean_value
if STATUS[i] == SUCCESS and MCZERO:
sub_mcdiarmid = 0.0
else:
sub_mcdiarmid = mcdiarmid_bound(sub_mean_value,sqrt(diam[i]))
print "McDiarmid bound: %s" % sub_mcdiarmid
weighted_bound.append(probmass[i] * sub_mcdiarmid)
print "weighted McDiarmid: %s" % weighted_bound[-1]
# prepare new set of random samples (on each subcuboid) as 'data'
if PER_AI:
pts = random_samples(lb[i],ub[i],num_sample_points)
npts_i = sampled_pts(pts,lb[i],ub[i])
print "Number of sample points: %s" % npts_i
if not npts_i:
print "Warning, no sample points in bounded region"
alpha_i = 0.0 #FIXME: defining undefined alpha to be 0.0
else:
#alpha_i = alpha(npts_i,sub_mcdiarmid) #XXX: oops... was wrong
alpha_i = alpha(npts_i,sqrt(diam[i]))
print "alpha: %s" % alpha_i
s_prob = sampled_prob(pts,lb[i],ub[i])
print "Sampled probability mass: %s" % s_prob
s_mean = sampled_mean(pts,lb[i],ub[i])
if s_mean == None:
s_mean = 0.0 #FIXME: defining undefined means to be 0.0
print "Sampled mean value: %s" % s_mean
if STATUS[i] == SUCCESS and MCZERO:
samp_mcdiarmid = 0.0
else:
samp_mcdiarmid = mcdiarmid_bound((s_mean-alpha_i),sqrt(diam[i]))
print "Sampled McDiarmid bound: %s" % samp_mcdiarmid
if PER_AI: #XXX: 'cheat' by using probmass for uniform Ai
sampled_bound.append(probmass[i] * samp_mcdiarmid)
else:
sampled_bound.append(s_prob * samp_mcdiarmid)
print "weighted sampled McDiarmid: %s" % sampled_bound[-1]
# compare weighted to McDiarmid
print "\n\n.............."
p_mcdiarmid = probmass0[0] * mcdiarmid
print "McDiarmid: %s" % p_mcdiarmid
weighted = sum(weighted_bound)
print "weighted McDiarmid: %s" % weighted
try:
print "relative change: %s" % (weighted / p_mcdiarmid)
except ZeroDivisionError:
pass
#if not PER_AI:
sampled = sum(sampled_bound)
print "weighted sampled McDiarmid: %s" % sampled
print "\n.............."
print " sum probability mass: %s" % sum(probmass)
# EOF
|
Tithen-Firion/youtube-dl | refs/heads/master | setup.py | 8 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import os.path
import warnings
import sys
try:
from setuptools import setup, Command
setuptools_available = True
except ImportError:
from distutils.core import setup, Command
setuptools_available = False
from distutils.spawn import spawn
try:
# This will create an exe that needs Microsoft Visual C++ 2008
# Redistributable Package
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print('Cannot import py2exe', file=sys.stderr)
exit(1)
py2exe_options = {
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dist_dir': '.',
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
}
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
DESCRIPTION = 'YouTube video downloader'
LONG_DESCRIPTION = 'Command-line program to download videos from YouTube.com and other video sites'
py2exe_console = [{
'script': './youtube_dl/__main__.py',
'dest_base': 'youtube-dl',
'version': __version__,
'description': DESCRIPTION,
'comments': LONG_DESCRIPTION,
'product_name': 'youtube-dl',
'product_version': __version__,
}]
py2exe_params = {
'console': py2exe_console,
'options': {'py2exe': py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
if setuptools_available:
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
else:
params['scripts'] = ['bin/youtube-dl']
class build_lazy_extractors(Command):
description = 'Build the extractor lazy loading module'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
spawn(
[sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'],
dry_run=self.dry_run,
)
setup(
name='youtube_dl',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='https://github.com/rg3/youtube-dl',
author='Ricardo Garcia',
author_email='[email protected]',
maintainer='Sergey M.',
maintainer_email='[email protected]',
packages=[
'youtube_dl',
'youtube_dl.extractor', 'youtube_dl.downloader',
'youtube_dl.postprocessor'],
# Provokes warning on most systems (why?!)
# test_suite = 'nose.collector',
# test_requires = ['nosetest'],
classifiers=[
'Topic :: Multimedia :: Video',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: Public Domain',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
cmdclass={'build_lazy_extractors': build_lazy_extractors},
**params
)
|
umbrellaTech/pyaboleto | refs/heads/master | setup.py | 1 | # -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='pyaboleto',
packages=['pyaboleto'], # this must be the same as the name above
version='0.1',
description='O pyaboleto e um componente de boleto bancario em Python que oferece um conjunto de classes para gerar'
' os dados dos boletos e não os templates. O pyaboleto é uma reescrita do YaBoleto PHP desenvolvido'
' pela umbrelaTech.',
author='Kelson da Costa Medeiros',
author_email='[email protected]',
url='https://github.com/umbrellaTech/pyaboleto', # use the URL to the github repo
download_url='https://github.com/umbrellaTech/pyaboleto/tarball/0.1', # I'll explain this in a second
keywords=['boleto'], # arbitrary keywords
classifiers=[],
)
|
Apreche/Presentoh | refs/heads/master | utils/jinja2/testsuite/api.py | 5 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.api
~~~~~~~~~~~~~~~~~~~~
Tests the public API and related stuff.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import tempfile
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, Undefined, DebugUndefined, \
StrictUndefined, UndefinedError, Template, meta, \
is_undefined, Template, DictLoader
from jinja2.utils import Cycler
env = Environment()
class ExtendedAPITestCase(JinjaTestCase):
def test_item_and_attribute(self):
from jinja2.sandbox import SandboxedEnvironment
for env in Environment(), SandboxedEnvironment():
# the |list is necessary for python3
tmpl = env.from_string('{{ foo.items()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo|attr("items")()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo["items"] }}')
assert tmpl.render(foo={'items': 42}) == '42'
def test_finalizer(self):
def finalize_none_empty(value):
if value is None:
value = u''
return value
env = Environment(finalize=finalize_none_empty)
tmpl = env.from_string('{% for item in seq %}|{{ item }}{% endfor %}')
assert tmpl.render(seq=(None, 1, "foo")) == '||1|foo'
tmpl = env.from_string('<{{ none }}>')
assert tmpl.render() == '<>'
def test_cycler(self):
items = 1, 2, 3
c = Cycler(*items)
for item in items + items:
assert c.current == item
assert c.next() == item
c.next()
assert c.current == 2
c.reset()
assert c.current == 1
def test_expressions(self):
expr = env.compile_expression("foo")
assert expr() is None
assert expr(foo=42) == 42
expr2 = env.compile_expression("foo", undefined_to_none=False)
assert is_undefined(expr2())
expr = env.compile_expression("42 + foo")
assert expr(foo=42) == 84
def test_template_passthrough(self):
t = Template('Content')
assert env.get_template(t) is t
assert env.select_template([t]) is t
assert env.get_or_select_template([t]) is t
assert env.get_or_select_template(t) is t
def test_autoescape_autoselect(self):
def select_autoescape(name):
if name is None or '.' not in name:
return False
return name.endswith('.html')
env = Environment(autoescape=select_autoescape,
loader=DictLoader({
'test.txt': '{{ foo }}',
'test.html': '{{ foo }}'
}))
t = env.get_template('test.txt')
assert t.render(foo='<foo>') == '<foo>'
t = env.get_template('test.html')
assert t.render(foo='<foo>') == '<foo>'
t = env.from_string('{{ foo }}')
assert t.render(foo='<foo>') == '<foo>'
class MetaTestCase(JinjaTestCase):
def test_find_undeclared_variables(self):
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar'])
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}'
'{% macro meh(x) %}{{ x }}{% endmacro %}'
'{% for item in seq %}{{ muh(item) + meh(seq) }}{% endfor %}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar', 'seq', 'muh'])
def test_find_refererenced_templates(self):
ast = env.parse('{% extends "layout.html" %}{% include helper %}')
i = meta.find_referenced_templates(ast)
assert i.next() == 'layout.html'
assert i.next() is None
assert list(i) == []
ast = env.parse('{% extends "layout.html" %}'
'{% from "test.html" import a, b as c %}'
'{% import "meh.html" as meh %}'
'{% include "muh.html" %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['layout.html', 'test.html', 'meh.html', 'muh.html']
def test_find_included_templates(self):
ast = env.parse('{% include ["foo.html", "bar.html"] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ("foo.html", "bar.html") %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ["foo.html", "bar.html", foo] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
ast = env.parse('{% include ("foo.html", "bar.html", foo) %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
class StreamingTestCase(JinjaTestCase):
def test_basic_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=range(4))
self.assert_equal(stream.next(), '<ul>')
self.assert_equal(stream.next(), '<li>1 - 0</li>')
self.assert_equal(stream.next(), '<li>2 - 1</li>')
self.assert_equal(stream.next(), '<li>3 - 2</li>')
self.assert_equal(stream.next(), '<li>4 - 3</li>')
self.assert_equal(stream.next(), '</ul>')
def test_buffered_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=range(4))
stream.enable_buffering(size=3)
self.assert_equal(stream.next(), u'<ul><li>1 - 0</li><li>2 - 1</li>')
self.assert_equal(stream.next(), u'<li>3 - 2</li><li>4 - 3</li></ul>')
def test_streaming_behavior(self):
tmpl = env.from_string("")
stream = tmpl.stream()
assert not stream.buffered
stream.enable_buffering(20)
assert stream.buffered
stream.disable_buffering()
assert not stream.buffered
class UndefinedTestCase(JinjaTestCase):
def test_default_undefined(self):
env = Environment(undefined=Undefined)
self.assert_equal(env.from_string('{{ missing }}').render(), u'')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42), '')
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_debug_undefined(self):
env = Environment(undefined=DebugUndefined)
self.assert_equal(env.from_string('{{ missing }}').render(), '{{ missing }}')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42),
u"{{ no such element: int object['missing'] }}")
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_strict_undefined(self):
env = Environment(undefined=StrictUndefined)
self.assert_raises(UndefinedError, env.from_string('{{ missing }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing.attribute }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing|list }}').render)
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_raises(UndefinedError, env.from_string('{{ foo.missing }}').render, foo=42)
self.assert_raises(UndefinedError, env.from_string('{{ not missing }}').render)
def test_indexing_gives_undefined(self):
t = Template("{{ var[42].foo }}")
self.assert_raises(UndefinedError, t.render, var=0)
def test_none_gives_proper_error(self):
try:
Environment().getattr(None, 'split')()
except UndefinedError, e:
assert e.message == "'None' has no attribute 'split'"
else:
assert False, 'expected exception'
def test_object_repr(self):
try:
Undefined(obj=42, name='upper')()
except UndefinedError, e:
assert e.message == "'int object' has no attribute 'upper'"
else:
assert False, 'expected exception'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtendedAPITestCase))
suite.addTest(unittest.makeSuite(MetaTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
suite.addTest(unittest.makeSuite(UndefinedTestCase))
return suite
|
jgabriellima/mining | refs/heads/master | mining/db/backends/__init__.py | 12133432 | |
MandyBohm3/Mandy | refs/heads/master | py/openage/convert/__init__.py | 12133432 | |
genome/flow-core | refs/heads/master | flow/configuration/settings/__init__.py | 12133432 | |
gangadharkadam/v6_erp | refs/heads/master | erpnext/stock/doctype/item_attribute_value/__init__.py | 12133432 | |
dex4er/django | refs/heads/1.6.x | tests/inline_formsets/__init__.py | 12133432 | |
juanyaw/python | refs/heads/develop | cpython/Lib/lib2to3/fixes/fix_raise.py | 203 | """Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = " "
if "val" not in results:
# One-argument raise
new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
new.prefix = node.prefix
return new
val = results["val"].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = exc
# If there's a traceback and None is passed as the value, then don't
# add a call, since the user probably just wants to add a
# traceback. See issue #9661.
if val.type != token.NAME or val.value != "None":
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt,
[Name("raise"), Call(exc, args)],
prefix=node.prefix)
|
nhynes/neon | refs/heads/master | tests/test_mergebroadcast_layer.py | 2 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolution layer tests
"""
import numpy as np
from neon import NervanaObject
from neon.backends import gen_backend
from neon.layers import Sequential, Conv, Pooling, MergeBroadcast, Affine
from neon.initializers.initializer import Gaussian, Constant
from neon.transforms import Rectlin, Softmax
init1 = Gaussian(scale=0.01)
relu = Rectlin()
bias = Constant(0)
common = dict(activation=relu, init=init1, bias=bias)
commonp1 = dict(activation=relu, init=init1, bias=bias, padding=1)
commonp3s2 = dict(activation=relu, init=init1, bias=bias, padding=3, strides=2)
pool3s1p1 = dict(fshape=3, padding=1, strides=1)
batch_size = 64
def fshape(rs, k):
return (rs, rs, k)
def inception(kvals, name="i"):
(p1, p2, p3) = kvals
branch1 = [Sequential([Conv(fshape(1, p1[0]), **common)])] if p1[0] else []
branch2 = [Sequential([Conv(fshape(1, p2[0]), **common),
Conv(fshape(3, p2[1]), **commonp1)])]
branch3 = [Sequential([Pooling(op=p3[0], **pool3s1p1)] + (
[Conv(fshape(1, p3[1]), **common)] if p3[1] else []))]
partitions = branch1 + branch2 + branch3
return [MergeBroadcast(layers=partitions, merge="depth")]
def inception_bare(ref_module, kvals, name="i"):
(p1, p2, p3) = kvals
branch1 = [Conv(fshape(1, p1[0]), **common)] if p1[0] else []
branch2 = [Conv(fshape(1, p2[0]), **common), Conv(fshape(3, p2[1]), **commonp1)]
branch3 = [Pooling(op=p3[0], **pool3s1p1)] + (
[Conv(fshape(1, p3[1]), **common)] if p3[1] else [])
branch1 = Sequential(branch1)
branch2 = Sequential(branch2)
branch3 = Sequential(branch3)
(branch1_ref, branch2_ref, branch3_ref) = ref_module[0].layers
if p1[0]:
for ll, lr in zip(branch1.layers, branch1_ref.layers):
if ll.has_params:
ll.set_params(lr.W.get())
for ll, lr in zip(branch2.layers, branch2_ref.layers):
if ll.has_params:
ll.set_params(lr.W.get())
if p3[1]:
for ll, lr in zip(branch3.layers, branch3_ref.layers):
if ll.has_params:
ll.set_params(lr.W.get())
return (branch1.layers, branch2.layers, branch3.layers)
def main_branch():
return [Conv(fshape(7, 64), **commonp3s2),
Pooling(fshape=3, strides=2, padding=1, op="max"),
Conv(fshape(3, 192), **commonp1),
Pooling(fshape=3, strides=2, padding=1, op="max")]
def top_branch():
return [Pooling(fshape=7, strides=1, op="avg"),
Affine(nout=100, init=init1, activation=Softmax(), bias=bias)]
def test_branch_model():
NervanaObject.be = gen_backend("gpu", batch_size=64)
be = NervanaObject.be
main1 = main_branch()
i1 = inception([(32,), (32, 32), ('max', 16)])
top = top_branch()
neon_layer = Sequential(main1 + i1 + top)
inshape = (3, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
print neon_layer.nested_str()
neon_layer.layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_layer.layers[0].set_deltas([be.iobuf(inshape)])
neon_out = neon_layer.fprop(inp).get()
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].set_deltas([be.iobuf(inshape)])
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[:8]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params(lo.W.get())
ll.allocate()
ll.set_deltas([be.iobuf(ll.in_shape)])
for bb in (b1, b2, b3):
for ll in bb:
ll.allocate()
ll.set_deltas([be.iobuf(ll.in_shape)])
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[8].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
start = 0
for bb in (b1, b2, b3):
xb = x
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
difference = neon_out_ref - neon_out
assert np.max(np.abs(difference)) < 1e-7
print np.max(np.abs(difference))
print "Beginning Back prop"
erra = np.random.random(neon_out.shape)
err = be.array(erra)
for ll in reversed(neon_layer.layers[8:]):
err = ll.bprop(err)
neon_deltas = err.get()
for bb, errb in zip((b1, b2, b3), neon_layer.layers[8].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = b1[0].deltas + b2[0].deltas + b3[0].deltas
neon_ref_deltas = ref_deltas.get()
difference = neon_deltas - neon_ref_deltas
print np.max(np.abs(difference))
assert np.max(np.abs(difference)) < 1e-8
def test_branch_model_fork():
from neon.layers import BranchNode, Tree
NervanaObject.be = gen_backend("gpu", batch_size=64)
be = NervanaObject.be
bnode = BranchNode()
i1 = inception([(32,), (32, 32), ('max', 16)])
top1 = top_branch()
top2 = top_branch()
p1 = Sequential(main_branch() + [bnode, i1] + top1)
p2 = [bnode] + top2
alpha2 = 0.3
neon_layer = Tree([p1, p2], alphas=[1.0, alpha2])
inshape = (3, 224, 224)
insize = np.prod(inshape)
inpa = np.random.random((insize, batch_size))
neon_layer.configure(inshape)
inp = neon_layer.be.array(inpa)
neon_layer.allocate()
print neon_layer.nested_str()
neon_layer.layers[0].layers[0].prev_layer = True
neon_layer.allocate_deltas()
neon_layer.layers[0].layers[0].set_deltas([be.iobuf(inshape)])
neon_out_dev = neon_layer.fprop(inp)
neon_out = [d.get() for d in neon_out_dev]
# Now make the reference pathways:
main_trunk2 = Sequential(main_branch())
main_trunk2.configure(inshape)
main2 = main_trunk2.layers
main2[0].prev_layer = True
main2[0].set_deltas([be.iobuf(inshape)])
branch2 = Sequential(top_branch())
lbranch2 = branch2.layers
(b1, b2, b3) = inception_bare(i1, [(32,), (32, 32), ('max', 16)])
for bb in (b1, b2, b3, lbranch2):
oshape = inshape
for ll in main2 + bb:
oshape = ll.configure(oshape)
main1_trunk = neon_layer.layers[0].layers[:8]
for ll, lo in zip(main2, main1_trunk):
if ll.has_params:
ll.set_params(lo.W.get())
ll.allocate()
ll.set_deltas([be.iobuf(ll.in_shape)])
for ll, lo in zip(lbranch2, neon_layer.layers[1].layers[1:]):
if ll.has_params:
ll.set_params(lo.W.get())
for bb in (b1, b2, b3, lbranch2):
for ll in bb:
ll.allocate()
ll.set_deltas([be.iobuf(ll.in_shape)])
# Create the combined output buffer
merge_output = be.empty_like(neon_layer.layers[0].layers[9].outputs)
x = inp
for ll in main2:
x = ll.fprop(x)
main2_out = x
start = 0
for bb in (b1, b2, b3):
xb = main2_out
for ll in bb:
xb = ll.fprop(xb)
end = start + xb.shape[0]
merge_output[start:end] = xb
start = end
x = merge_output
top_trunk = Sequential(top1).layers
for ll in top_trunk:
x = ll.fprop(x)
neon_out_ref = x.get()
difference = neon_out_ref - neon_out[0]
assert np.max(np.abs(difference)) < 1e-7
print np.max(np.abs(difference))
# Now do second branch
neon_out_ref2 = branch2.fprop(main2_out).get()
difference = neon_out_ref2 - neon_out[1]
assert np.max(np.abs(difference)) < 1e-7
print np.max(np.abs(difference))
print "Beginning Back prop"
erra = [np.random.random(d.shape) for d in neon_out]
err = [be.array(d) for d in erra]
neon_layer.layers[0].layers[0].deltas = be.iobuf(inshape)
neon_layer.bprop(err)
bottom_neon_deltas = neon_layer.layers[0].layers[1].deltas.get()
middle_neon_deltas = neon_layer.layers[1].layers[1].deltas.get()
err0 = err[0]
for ll in reversed(top_trunk):
err0 = ll.bprop(err0)
err1 = err[1]
for ll in reversed(lbranch2):
err1 = ll.bprop(err1)
for bb, errb in zip((b1, b2, b3), neon_layer.layers[0].layers[-5].error_views):
for ll in reversed(bb):
errb = ll.bprop(errb)
# Now sum up the deltas at the root of the branch layer and compare
ref_deltas = be.zeros_like(b1[0].deltas)
ref_deltas[:] = b1[0].deltas + b2[0].deltas + b3[0].deltas + alpha2 * lbranch2[0].deltas
neon_ref_deltas = ref_deltas.get()
difference = middle_neon_deltas - neon_ref_deltas
print np.max(np.abs(difference))
assert np.max(np.abs(difference)) < 1e-8
x = ref_deltas
main2[0].deltas = be.iobuf(inshape)
for ll in reversed(main2):
x = ll.bprop(x)
bottom_neon_ref_deltas = main2[1].deltas.get()
difference = bottom_neon_deltas - bottom_neon_ref_deltas
print np.max(np.abs(difference))
assert np.max(np.abs(difference)) < 1e-8
if __name__ == '__main__':
test_branch_model_fork()
|
allotria/intellij-community | refs/heads/master | python/testData/joinLines/DictLCurly.py | 83 | a =<caret> {
'a': 1,
'b': 2
}
|
sandeepdsouza93/TensorFlow-15712 | refs/heads/master | tensorflow/python/framework/errors_impl.py | 28 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exception types for TensorFlow errors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import traceback
import warnings
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import compat
class OpError(Exception):
"""A generic error that is raised when TensorFlow execution fails.
Whenever possible, the session will raise a more specific subclass
of `OpError` from the `tf.errors` module.
@@op
@@node_def
"""
def __init__(self, node_def, op, message, error_code):
"""Creates a new `OpError` indicating that a particular op failed.
Args:
node_def: The `node_def_pb2.NodeDef` proto representing the op that
failed, if known; otherwise None.
op: The `ops.Operation` that failed, if known; otherwise None.
message: The message string describing the failure.
error_code: The `error_codes_pb2.Code` describing the error.
"""
super(OpError, self).__init__()
self._message = message
self._node_def = node_def
self._op = op
self._error_code = error_code
@property
def message(self):
"""The error message that describes the error."""
return self._message
@property
def op(self):
"""The operation that failed, if known.
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
[`Operation`](../../api_docs/python/framework.md#Operation)
object. In that case, this will return `None`, and you should
instead use the [`OpError.node_def`](#OpError.node_def) to
discover information about the op.
Returns:
The `Operation` that failed, or None.
"""
return self._op
@property
def error_code(self):
"""The integer error code that describes the error."""
return self._error_code
@property
def node_def(self):
"""The `NodeDef` proto representing the op that failed."""
return self._node_def
def __str__(self):
if self._op is not None:
output = ["%s\n\nCaused by op %r, defined at:\n" % (self.message,
self._op.name,)]
curr_traceback_list = traceback.format_list(self._op.traceback)
output.extend(curr_traceback_list)
# pylint: disable=protected-access
original_op = self._op._original_op
# pylint: enable=protected-access
while original_op is not None:
output.append(
"\n...which was originally created as op %r, defined at:\n"
% (original_op.name,))
prev_traceback_list = curr_traceback_list
curr_traceback_list = traceback.format_list(original_op.traceback)
# Attempt to elide large common subsequences of the subsequent
# stack traces.
#
# TODO(mrry): Consider computing the actual longest common subsequence.
is_eliding = False
elide_count = 0
last_elided_line = None
for line, line_in_prev in zip(curr_traceback_list, prev_traceback_list):
if line == line_in_prev:
if is_eliding:
elide_count += 1
last_elided_line = line
else:
output.append(line)
is_eliding = True
elide_count = 0
else:
if is_eliding:
if elide_count > 0:
output.extend(
["[elided %d identical lines from previous traceback]\n"
% (elide_count - 1,), last_elided_line])
is_eliding = False
output.extend(line)
# pylint: disable=protected-access
original_op = original_op._original_op
# pylint: enable=protected-access
output.append("\n%s (see above for traceback): %s\n" %
(type(self).__name__, self.message))
return "".join(output)
else:
return self.message
OK = error_codes_pb2.OK
CANCELLED = error_codes_pb2.CANCELLED
UNKNOWN = error_codes_pb2.UNKNOWN
INVALID_ARGUMENT = error_codes_pb2.INVALID_ARGUMENT
DEADLINE_EXCEEDED = error_codes_pb2.DEADLINE_EXCEEDED
NOT_FOUND = error_codes_pb2.NOT_FOUND
ALREADY_EXISTS = error_codes_pb2.ALREADY_EXISTS
PERMISSION_DENIED = error_codes_pb2.PERMISSION_DENIED
UNAUTHENTICATED = error_codes_pb2.UNAUTHENTICATED
RESOURCE_EXHAUSTED = error_codes_pb2.RESOURCE_EXHAUSTED
FAILED_PRECONDITION = error_codes_pb2.FAILED_PRECONDITION
ABORTED = error_codes_pb2.ABORTED
OUT_OF_RANGE = error_codes_pb2.OUT_OF_RANGE
UNIMPLEMENTED = error_codes_pb2.UNIMPLEMENTED
INTERNAL = error_codes_pb2.INTERNAL
UNAVAILABLE = error_codes_pb2.UNAVAILABLE
DATA_LOSS = error_codes_pb2.DATA_LOSS
# pylint: disable=line-too-long
class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
[`queue.enqueue()`](../../api_docs/python/io_ops.md#QueueBase.enqueue) may be
cancelled by running another operation (e.g.
[`queue.close(cancel_pending_enqueues=True)`](../../api_docs/python/io_ops.md#QueueBase.close),
or by [closing the session](../../api_docs/python/client.md#Session.close).
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `CancelledError`."""
super(CancelledError, self).__init__(node_def, op, message, CANCELLED)
# pylint: enable=line-too-long
class UnknownError(OpError):
"""Unknown error.
An example of where this error may be returned is if a Status value
received from another address space belongs to an error-space that
is not known to this address space. Also errors raised by APIs that
do not return enough error information may be converted to this
error.
@@__init__
"""
def __init__(self, node_def, op, message, error_code=UNKNOWN):
"""Creates an `UnknownError`."""
super(UnknownError, self).__init__(node_def, op, message, error_code)
class InvalidArgumentError(OpError):
"""Raised when an operation receives an invalid argument.
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
[`tf.matmul()`](../../api_docs/python/math_ops.md#matmul) op will raise this
error if it receives an input that is not a matrix, and the
[`tf.reshape()`](../../api_docs/python/array_ops.md#reshape) op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InvalidArgumentError`."""
super(InvalidArgumentError, self).__init__(node_def, op, message,
INVALID_ARGUMENT)
class DeadlineExceededError(OpError):
"""Raised when a deadline expires before an operation could complete.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DeadlineExceededError`."""
super(DeadlineExceededError, self).__init__(node_def, op, message,
DEADLINE_EXCEEDED)
class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
[`tf.WholeFileReader.read()`](../../api_docs/python/io_ops.md#WholeFileReader)
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `NotFoundError`."""
super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND)
class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
(e.g. [`tf.train.Saver.save()`](../../api_docs/python/train.md#Saver.save))
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AlreadyExistsError`."""
super(AlreadyExistsError, self).__init__(node_def, op, message,
ALREADY_EXISTS)
class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
[`tf.WholeFileReader.read()`](../../api_docs/python/io_ops.md#WholeFileReader)
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `PermissionDeniedError`."""
super(PermissionDeniedError, self).__init__(node_def, op, message,
PERMISSION_DENIED)
class UnauthenticatedError(OpError):
"""The request does not have valid authentication credentials.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnauthenticatedError`."""
super(UnauthenticatedError, self).__init__(node_def, op, message,
UNAUTHENTICATED)
class ResourceExhaustedError(OpError):
"""Some resource has been exhausted.
For example, this error might be raised if a per-user quota is
exhausted, or perhaps the entire file system is out of space.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `ResourceExhaustedError`."""
super(ResourceExhaustedError, self).__init__(node_def, op, message,
RESOURCE_EXHAUSTED)
class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
that reads a [`tf.Variable`](../../api_docs/python/state_ops.md#Variable)
before it has been initialized.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `FailedPreconditionError`."""
super(FailedPreconditionError, self).__init__(node_def, op, message,
FAILED_PRECONDITION)
class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
[`queue.enqueue()`](../../api_docs/python/io_ops.md#QueueBase.enqueue)
operation may raise `AbortedError` if a
[`queue.close()`](../../api_docs/python/io_ops.md#QueueBase.close) operation
previously ran.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `AbortedError`."""
super(AbortedError, self).__init__(node_def, op, message, ABORTED)
class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
[`queue.dequeue()`](../../api_docs/python/io_ops.md#QueueBase.dequeue)
operation is blocked on an empty queue, and a
[`queue.close()`](../../api_docs/python/io_ops.md#QueueBase.close)
operation executes.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `OutOfRangeError`."""
super(OutOfRangeError, self).__init__(node_def, op, message,
OUT_OF_RANGE)
class UnimplementedError(OpError):
"""Raised when an operation has not been implemented.
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
the [`tf.nn.max_pool()`](../../api_docs/python/nn.md#max_pool) operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnimplementedError`."""
super(UnimplementedError, self).__init__(node_def, op, message,
UNIMPLEMENTED)
class InternalError(OpError):
"""Raised when the system experiences an internal error.
This exception is raised when some invariant expected by the runtime
has been broken. Catching this exception is not recommended.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `InternalError`."""
super(InternalError, self).__init__(node_def, op, message, INTERNAL)
class UnavailableError(OpError):
"""Raised when the runtime is currently unavailable.
This exception is not currently used.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates an `UnavailableError`."""
super(UnavailableError, self).__init__(node_def, op, message,
UNAVAILABLE)
class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
[`tf.WholeFileReader.read()`](../../api_docs/python/io_ops.md#WholeFileReader)
operation, if the file is truncated while it is being read.
@@__init__
"""
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
_CODE_TO_EXCEPTION_CLASS = {
CANCELLED: CancelledError,
UNKNOWN: UnknownError,
INVALID_ARGUMENT: InvalidArgumentError,
DEADLINE_EXCEEDED: DeadlineExceededError,
NOT_FOUND: NotFoundError,
ALREADY_EXISTS: AlreadyExistsError,
PERMISSION_DENIED: PermissionDeniedError,
UNAUTHENTICATED: UnauthenticatedError,
RESOURCE_EXHAUSTED: ResourceExhaustedError,
FAILED_PRECONDITION: FailedPreconditionError,
ABORTED: AbortedError,
OUT_OF_RANGE: OutOfRangeError,
UNIMPLEMENTED: UnimplementedError,
INTERNAL: InternalError,
UNAVAILABLE: UnavailableError,
DATA_LOSS: DataLossError,
}
_EXCEPTION_CLASS_TO_CODE = dict((
(class_, code) for (code, class_) in _CODE_TO_EXCEPTION_CLASS.items()))
def exception_type_from_error_code(error_code):
return _CODE_TO_EXCEPTION_CLASS[error_code]
def error_code_from_exception_type(cls):
return _EXCEPTION_CLASS_TO_CODE[cls]
def _make_specific_exception(node_def, op, message, error_code):
try:
exc_type = exception_type_from_error_code(error_code)
return exc_type(node_def, op, message)
except KeyError:
warnings.warn("Unknown error code: %d" % error_code)
return UnknownError(node_def, op, message, error_code)
@contextlib.contextmanager
def raise_exception_on_not_ok_status():
try:
status = pywrap_tensorflow.TF_NewStatus()
yield status
if pywrap_tensorflow.TF_GetCode(status) != 0:
raise _make_specific_exception(
None, None,
compat.as_text(pywrap_tensorflow.TF_Message(status)),
pywrap_tensorflow.TF_GetCode(status))
finally:
pywrap_tensorflow.TF_DeleteStatus(status)
|
pombredanne/seqlearn | refs/heads/master | seqlearn/__init__.py | 45 | from ._version import __version__
|
suhe/odoo | refs/heads/master | addons/website_form/controllers/__init__.py | 7372 | import main
|
SebasSBM/django | refs/heads/master | tests/gis_tests/geogapp/models.py | 336 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City(NamedModel):
point = models.PointField(geography=True)
class Meta:
app_label = 'geogapp'
required_db_features = ['gis_enabled']
class Zipcode(NamedModel):
code = models.CharField(max_length=10)
poly = models.PolygonField(geography=True)
class County(NamedModel):
state = models.CharField(max_length=20)
mpoly = models.MultiPolygonField(geography=True)
class Meta:
app_label = 'geogapp'
required_db_features = ['gis_enabled']
def __str__(self):
return ' County, '.join([self.name, self.state])
|
tisnik/fabric8-analytics-common | refs/heads/master | dashboard/src/api.py | 2 | """Module with class representing common API."""
import requests
class Api:
"""Class representing common API."""
_API_ENDPOINT = 'api/v1'
def __init__(self, url, token=None):
"""Set the API endpoint and store the authorization token if provided."""
self.url = Api.add_slash(url)
self.token = token
def is_api_running(self):
"""Check if the API is available for calls."""
try:
res = requests.get(self.url)
if res.status_code in {200, 401}:
return True
except requests.exceptions.ConnectionError:
pass
return False
@staticmethod
def add_slash(url):
"""Add a slash at end of URL, if the slash is not provided."""
if url and not url.endswith('/'):
url += '/'
return url
def get(self):
"""Use GET method to access API."""
return requests.get(self.url)
def print_error_response(self, response, message_key):
"""Print error message if anything goes wrong."""
print(" Server returned HTTP code {c}".format(c=response.status_code))
error_message = None
try:
error_message = response.json().get(message_key, "Server did not sent error message")
except Exception:
pass # no error message
if error_message:
print(" Error message: {m}".format(m=error_message))
|
AmmsA/Githeat | refs/heads/master | test/test_interactive.py | 1 | """ Test suite for the interactive module.
The script can be executed on its own or incorporated into a larger test suite.
However the tests are run, be aware of which version of the module is actually
being tested. If the library is installed in site-packages, that version takes
precedence over the version in this project directory. Use a virtualenv test
environment or setuptools develop mode to test against the development version.
"""
import os
from subprocess import call
from sys import executable
import functools
import blessed
import pytest
from githeat import interactive
from xtermcolor import colorize
from argparse import ArgumentTypeError
TEST_TERMINAL = functools.partial(blessed.Terminal, kind='xterm-256color')
@pytest.fixture(params=" ")
def command(request):
""" Return the command to run.
"""
return request.param
@pytest.fixture
def patch_terminal(monkeypatch):
class test_terminal:
class mydatetime:
@classmethod
def Terminal(cls):
return TEST_TERMINAL
monkeypatch.setattr(blessed, 'Terminal', test_terminal)
@pytest.fixture()
def patch_terminal_size(monkeypatch):
term_width = '250'
term_height = '60'
monkeypatch.setitem(os.environ, 'COLUMNS', term_width)
monkeypatch.setitem(os.environ, 'LINES', term_height)
def test_main(command):
""" Test the main() function.
"""
# Call with the --help option as a basic sanity check.
with pytest.raises(SystemExit) as exinfo:
interactive.main(("{:s}".format(command), "--help"))
assert 0 == exinfo.value.code
return
def test_script(command):
""" Test command line execution.
"""
# Call with the --help option as a basic sanity check.
cmdl = "{:s} -m githeat.interactive --help".format(executable)
assert 0 == call(cmdl.split())
return
def test_print_left_header(patch_terminal_size):
term = TEST_TERMINAL()
with term.cbreak():
inp = term.inkey(timeout=0.0001)
screen = {}
interactive.print_header_left(term, "left header", screen)
assert len(screen) == 1
assert screen[0, 0] == "left header"
def test_print_center_header(patch_terminal_size):
term = TEST_TERMINAL()
with term.cbreak():
screen = {}
text = "center header"
interactive.print_header_center(term, text, screen)
assert len(screen) == 1
x = (term.width // 2) - len(text) // 2
assert screen[0, x] == text
def test_print_right_header(patch_terminal_size):
term = TEST_TERMINAL()
with term.cbreak():
screen = {}
text = "right header"
interactive.print_header_right(term, text, screen)
assert len(screen) == 1
x = term.width - len(text)
print(screen)
print(x)
assert screen[0, x] == text
def test_print_footer_left(patch_terminal_size):
term = TEST_TERMINAL()
with term.cbreak():
screen = {}
text = "footer left"
interactive.print_footer_left(term, text, screen)
assert len(screen) == 1
assert screen[term.height - 1, 0] == text
def test_top_authors_to_string():
authors = [("John", 4), ("Jason", 3), ("James", 2), ("Jordon", 2), ("J", 0)]
assert interactive.top_authors_to_string(authors) == "John, Jason, James, Jordon, J"
def test_top_authors_to_string_colorized():
ansii_colors = [1, 2, 3, 4, 5]
authors = [("John", 4), ("Jason", 3), ("James", 2), ("Jordon", 2), ("J", 0)]
colored = []
for tup in authors:
colored.append(colorize(tup[0], ansi=ansii_colors[tup[1]]))
colored = ", ".join(colored)
assert interactive.top_authors_to_string(authors, colors=ansii_colors) == colored
def test_print_graph_legend(patch_terminal_size):
term = TEST_TERMINAL()
with term.cbreak():
screen = {}
colors = [1, 2, 3, 4, 5]
block_width = " "
interactive.print_graph_legend(0, 0, block_width, 4, colors, screen, term)
result = {(0, 16): block_width,
(0, 12): block_width,
(0, 8): block_width,
(0, 4): block_width,
(0, 0): block_width}
assert screen == result
# usage: githeat.py [-h] [--width {thick,reg,thin}] [--days DAYS [DAYS ...]]
# [--color {grass,fire,sky}] [--stat-number STAT_NUMBER]
# [--stat] [--month-merge] [--hide-legend] [--author AUTHOR]
# [--grep GREP] [-c CONFIG] [-v]
# [--logging {CRITICAL,ERROR,WARNING,INFO,DEBUG,NOTSET}]
def test__cmdline():
argv = "--width reg --days sun Mon Fri Sat Friday --color sky --month-merge".split()
assert interactive._cmdline(argv).width == "reg"
assert interactive._cmdline(argv).days == ['Sunday', 'Friday', 'Saturday', 'Monday']
assert interactive._cmdline(argv).color == 'sky'
def test__cmdline_invalid_days():
argv = "--days blahday tuesday ".split()
with pytest.raises(ArgumentTypeError):
interactive._cmdline(argv)
def test_is_within_boundary_valid():
t = interactive.is_within_boundary(100, 0, 0, 100, interactive.Cursor(4, 5, None))
assert t is True
def test_is_within_boundary_invalid():
t = interactive.is_within_boundary(100, 0, 0, 100, interactive.Cursor(200, 200, None))
assert t is False
t = interactive.is_within_boundary(100, 10, 0, 100, interactive.Cursor(5, 55, None))
assert t is False
t = interactive.is_within_boundary(100, 0, 10, 100, interactive.Cursor(55, 5, None))
assert t is False
t = interactive.is_within_boundary(100, 0, 0, 100, interactive.Cursor(55, 200, None))
assert t is False
def test_resize_until_fit():
texts = ["hello", "there, ", "this is a loooooooooooooooooooooooooooooong text"]
new_texts = interactive.resize_until_fit(texts, 40)
assert new_texts == ['hello', 'there, ', 'this is a looooooooooooooooo']
texts = ["hello", "loooooooong looong", "loooooooooooooooooooooooooooooong text"]
new_texts = interactive.resize_until_fit(texts, 10)
assert new_texts == ['hello', 'loooo', '']
texts = ["hello", "there, ", "this text fits"]
new_texts = interactive.resize_until_fit(texts, 60)
assert new_texts == texts
# Make the script executable.
if __name__ == "__main__":
raise SystemExit(pytest.main(__file__))
|
TangXT/GreatCatMOOC | refs/heads/master | common/djangoapps/django_comment_common/models.py | 4 | import logging
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
FORUM_ROLE_ADMINISTRATOR = 'Administrator'
FORUM_ROLE_MODERATOR = 'Moderator'
FORUM_ROLE_COMMUNITY_TA = 'Community TA'
FORUM_ROLE_STUDENT = 'Student'
@receiver(post_save, sender=CourseEnrollment)
def assign_default_role(sender, instance, **kwargs):
# The code below would remove all forum Roles from a user when they unenroll
# from a course. Concerns were raised that it should apply only to students,
# or that even the history of student roles is important for research
# purposes. Since this was new functionality being added in this release,
# I'm just going to comment it out for now and let the forums team deal with
# implementing the right behavior.
#
# # We've unenrolled the student, so remove all roles for this course
# if not instance.is_active:
# course_roles = list(Role.objects.filter(course_id=instance.course_id))
# instance.user.roles.remove(*course_roles)
# return
# We've enrolled the student, so make sure they have the Student role
role = Role.objects.get_or_create(course_id=instance.course_id, name="Student")[0]
instance.user.roles.add(role)
class Role(models.Model):
name = models.CharField(max_length=30, null=False, blank=False)
users = models.ManyToManyField(User, related_name="roles")
course_id = models.CharField(max_length=255, blank=True, db_index=True)
class Meta:
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_role'
def __unicode__(self):
return self.name + " for " + (self.course_id if self.course_id else "all courses")
def inherit_permissions(self, role): # TODO the name of this method is a little bit confusing,
# since it's one-off and doesn't handle inheritance later
if role.course_id and role.course_id != self.course_id:
logging.warning("%s cannot inherit permissions from %s due to course_id inconsistency", \
self, role)
for per in role.permissions.all():
self.add_permission(per)
def add_permission(self, permission):
self.permissions.add(Permission.objects.get_or_create(name=permission)[0])
def has_permission(self, permission):
course_loc = CourseDescriptor.id_to_location(self.course_id)
course = modulestore().get_instance(self.course_id, course_loc)
if self.name == FORUM_ROLE_STUDENT and \
(permission.startswith('edit') or permission.startswith('update') or permission.startswith('create')) and \
(not course.forum_posts_allowed):
return False
return self.permissions.filter(name=permission).exists()
class Permission(models.Model):
name = models.CharField(max_length=30, null=False, blank=False, primary_key=True)
roles = models.ManyToManyField(Role, related_name="permissions")
class Meta:
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_permission'
def __unicode__(self):
return self.name
|
RO-ny9/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/ctypes/test/test_cfuncs.py | 52 | # A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(b' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
# The following repeates the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
pass
if __name__ == '__main__':
unittest.main()
|
wetneb/dissemin | refs/heads/master | deposit/tests/test_protocol.py | 2 | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from datetime import date
from io import BytesIO
import unittest
import django.test
import pytest
import os
from deposit.models import DDC
from deposit.models import License
from deposit.models import LicenseChooser
from deposit.protocol import DepositError
from deposit.protocol import DepositResult
from deposit.protocol import RepositoryProtocol
from deposit.registry import protocol_registry
from django.contrib.auth.models import User
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.forms import Form
from django.test.utils import override_settings
from django.urls import reverse
from papers.models import OaiSource
from papers.models import OaiRecord
from papers.models import Paper
class MetaTestProtocol():
"""
This class contains some tests that every implemented protocol shall pass. The tests are not executed as members of this class, but of any subclass.
If you change one of the tested functions in your subclassed protocol, please override the test in the corresponding test class.
"""
@pytest.mark.parametrize('embargo', [None, date.today()])
def test_add_embargo_date_to_deposit_result(self, embargo):
"""
If an embargo is set, add to deposit record, otherwise not
"""
# We just set cleaned data directly
f = Form()
f.cleaned_data = dict()
if embargo is not None:
f.cleaned_data['embargo'] = embargo
dr = DepositResult(status='pending')
dr = self.protocol._add_embargo_date_to_deposit_result(dr, f)
assert dr.embargo_date == embargo
def test_add_license_to_deposit_result(self, license_chooser):
"""
If a license is selected, add to deposit record, otherwise not
"""
# We just set the cleaned data directly
f = Form()
f.cleaned_data = dict()
if license_chooser:
f.cleaned_data['license'] = license_chooser
dr = DepositResult(status='pending')
dr = self.protocol._add_license_to_deposit_result(dr, f)
if license_chooser:
assert dr.license == license_chooser.license
else:
assert dr.license == None
def test_deposit_page_status(self, authenticated_client, rendering_get_page, book_god_of_the_labyrinth):
"""
Test the deposit page for HTTP Response 200
"""
r = rendering_get_page(authenticated_client, 'upload-paper', kwargs={'pk': book_god_of_the_labyrinth.pk})
assert r.status_code == 200
def test_get_form(self, book_god_of_the_labyrinth, abstract_required, ddc, embargo, license_chooser):
self.protocol.paper = book_god_of_the_labyrinth
form = self.protocol.get_form()
assert form.fields['abstract'].required == abstract_required
if ddc:
assert 'ddc' in form.fields
else:
assert 'ddc' not in form.fields
if embargo == 'required':
assert form.fields['embargo'].required == True
elif embargo == 'optional':
assert form.fields['embargo'].required == False
else:
assert 'embargo' not in form.fields
if license_chooser:
assert 'license' in form.fields
else:
assert 'license' not in form.fields
assert 'paper_id' in form.fields
def test_get_bound_form(self, book_god_of_the_labyrinth, abstract_required, ddc, embargo, license_chooser):
self.protocol.paper = book_god_of_the_labyrinth
data = {
'paper_pk' : book_god_of_the_labyrinth.pk
}
if abstract_required:
data['abstract'] = 'Simple abstract'
if ddc:
data['ddc'] = ddc
if license_chooser:
data['license'] = license_chooser.pk
if embargo == 'required':
data['embargo'] = '2019-10-10'
form = self.protocol.get_bound_form()
if not form.is_valid():
print(form.errors)
raise AssertionError("Form not valid")
def test_get_form_return_type(self, book_god_of_the_labyrinth, user_isaac_newton):
"""
Return type of get_form shall by a form
"""
self.protocol.init_deposit(book_god_of_the_labyrinth ,user_isaac_newton)
form = self.protocol.get_form()
assert isinstance(form, Form)
def test_init_deposit(self, user_isaac_newton, book_god_of_the_labyrinth):
"""
init_deposit shall return a bool
"""
result = self.protocol.init_deposit(book_god_of_the_labyrinth, user_isaac_newton)
assert self.protocol.paper == book_god_of_the_labyrinth
assert self.protocol.user == user_isaac_newton
assert self.protocol._logs == ''
assert result == True
def test_get_ddcs(self, db):
"""
Function should return a queryset of length > 1 of DDCs if DDCs are choosen
"""
for ddc in DDC.objects.all():
self.protocol.repository.ddc.add(ddc)
assert len(self.protocol._get_ddcs()) == DDC.objects.all().count()
def test_get_ddcs_none(self):
"""
Function should return ``None`` if noe DDS selected for repository
"""
assert self.protocol._get_ddcs() == None
def test_get_licenses(self, db):
"""
Function should return a queryset of of length > 1 of LicenseChoosers if LicenseChoosers are choosen
"""
for uri in ['https://creativecommons.org/publicdomain/zero/1.0/', 'https://creativecommons.org/licenses/by/4.0/', 'http://creativecommons.org/licenses/by-nd/4.0/']:
license = License.objects.get(uri=uri)
LicenseChooser.objects.create(
license=license,
repository=self.protocol.repository,
)
assert len(self.protocol._get_licenses()) == 3
def test_get_licenses_none(self):
"""
Function should return none if no LicenseChooser selected for repository
"""
assert self.protocol._get_licenses() == None
def test_get_preferences(self, user_isaac_newton):
"""
If a protocol has preferences, return object, else ``None``
"""
if self.protocol.preferences_model is None:
assert self.protocol.get_preferences(user_isaac_newton) == None
else:
assert isinstance(self.protocol.get_preferences(user_isaac_newton), self.protocol.preferences_model)
def test_log(self):
"""
Simply append a line to self._logs
"""
msg = 'Spanish Inquisition'
self.protocol.log(msg)
assert self.protocol._logs == msg + "\n"
def test_log_request(self, request_fake_response):
"""
Tests the log request.
"""
assert self.protocol.log_request(request_fake_response, 200, 'Does not serve') == None
def test_log_request_error(self, request_fake_response):
"""
Tests the log request
"""
with pytest.raises(DepositError):
self.protocol.log_request(request_fake_response, 201, 'Does not serve')
def test_protocol_identifier(self):
"""
Identifier should exist
"""
assert len(self.protocol.protocol_identifier()) > 1
@pytest.mark.parametrize('on_todolist', [True, False])
@pytest.mark.parametrize('splash_url, expected_splash_url', [(None, type(None)), ('https://repository.dissem.in/1/spam.pdf', OaiRecord)])
def test_submit_deposit_wrapper(self, splash_url, expected_splash_url, on_todolist, book_god_of_the_labyrinth, depositing_user, monkeypatch):
"""
We monkeypatch the submit_deposit to return a DepositResult.
"""
self.protocol.paper = book_god_of_the_labyrinth
self.protocol.user = depositing_user
if on_todolist:
book_god_of_the_labyrinth.todolist.add(self.protocol.user)
dr = DepositResult(splash_url=splash_url)
monkeypatch.setattr(self.protocol, 'submit_deposit', lambda *args, **kwargs: dr)
deposit_result = self.protocol.submit_deposit_wrapper()
assert isinstance(deposit_result, DepositResult)
assert isinstance(deposit_result.oairecord, expected_splash_url)
assert book_god_of_the_labyrinth.todolist.filter(pk=self.protocol.user.pk).exists() == False
@pytest.mark.parametrize('on_todolist', [True, False])
@pytest.mark.parametrize('exc', [DepositError, Exception])
def test_submit_deposit_wrapper_exception(self, book_god_of_the_labyrinth, depositing_user, on_todolist, exc, monkeypatch):
"""
Something went wrong when depositing. Exceptions must be fetched and Deposit status status must be "failed". To do that we simply monkeypatch submit_deposit
"""
self.protocol.paper = book_god_of_the_labyrinth
self.protocol.user = depositing_user
if on_todolist:
book_god_of_the_labyrinth.todolist.add(self.protocol.user)
def submit_deposit(self, *args, **kwargs):
raise exc
monkeypatch.setattr(self.protocol, 'submit_deposit', submit_deposit)
deposit_result = self.protocol.submit_deposit_wrapper()
assert deposit_result.status == 'failed'
assert book_god_of_the_labyrinth.todolist.filter(pk=self.protocol.user.pk).exists() == on_todolist
def test_protocol_registered(self):
"""
This test makes sure that each tested protocol is registered. You can temporarly override this function in your corresponding protocol test as long as you do not have it registered.
"""
p = protocol_registry.get(self.protocol.__class__.__name__)
assert issubclass(p, RepositoryProtocol) == True
# 1x1 px image used as default logo for the repository
simple_png_image = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\tpHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdf\n\x12\x0c+\x19\x84\x1d/"\x00\x00\x00\x19tEXtComment\x00Created with GIMPW\x81\x0e\x17\x00\x00\x00\x0cIDAT\x08\xd7c\xa8\xa9\xa9\x01\x00\x02\xec\x01u\x90\x90\x1eL\x00\x00\x00\x00IEND\xaeB`\x82'
@pytest.mark.usefixtures("load_test_data", "rebuild_index")
class ProtocolTest():
"""
Set of generic tests that any protocol should pass.
Note: This class is going to deprecated and is no longer maintained.
"""
def __init__(self, *args, **kwargs):
super(ProtocolTest, self).__init__(*args, **kwargs)
@override_settings(MEDIA_ROOT='mediatest/')
def setUp(self):
if type(self) is ProtocolTest:
raise unittest.SkipTest("Base test")
self.p1 = Paper.get_or_create(
"This is a test paper",
[self.r1.name, self.r2.name, self.r4.name],
date(year=2014, month=2, day=15))
self.username = 'mydepositinguser'
self.password = 'supersecret'
self.user = User.objects.create_user(username=self.username, email="[email protected]", password=self.password)
self.testdir = os.path.dirname(os.path.abspath(__file__))
self.pdfpath = os.path.join(self.testdir, 'data/blank.pdf')
def setUpForProtocol(self, protocol_class, repository):
self.oaisource, _ = OaiSource.objects.get_or_create(
identifier='deposit_oaisource',
name='Repository OAI source',
default_pubtype='preprint')
logo = InMemoryUploadedFile(
BytesIO(simple_png_image),
None, 'logo.png',
'image/png', len(simple_png_image), None, None)
self.repo = repository
self.repo.oaisource = self.oaisource
self.repo.logo = logo
if not self.repo.description:
self.repo.description = 'brsuatiercs'
if not self.repo.name:
self.repo.name = 'Test Repository'
self.repo.protocol = protocol_class.__name__
self.repo.save()
protocol_registry.register(protocol_class)
self.proto = protocol_class(self.repo)
self.form = None
def test_protocol_identifier(self):
self.assertTrue(len(self.proto.protocol_identifier()) > 1)
def test_init_deposit(self):
retval = self.proto.init_deposit(self.p1, self.user)
self.assertIs(type(retval), bool)
def test_get_form_return_type(self):
self.proto.init_deposit(self.p1, self.user)
retval = self.proto.get_form()
self.assertIsInstance(retval, Form)
def test_deposit_page(self):
self.assertEqual(self.user.username, self.username)
client = django.test.Client(HTTP_HOST='localhost')
self.assertTrue(client.login(username=self.username, password=self.password))
r = client.get(reverse('upload-paper', kwargs={'pk': self.p1.pk}))
self.assertEqual(r.status_code, 200)
def dry_deposit(self, paper, **form_fields):
"""
This is not a test by itself - it's a method
subclasses can call to create a fake deposit
(if the protocol supports it!)
"""
return self.deposit(paper, dry_run=True, **form_fields)
def deposit(self, paper, dry_run=False, **form_fields):
enabled = self.proto.init_deposit(paper, self.user)
self.assertTrue(enabled)
licenses = LicenseChooser.objects.by_repository(repository=self.repo)
args = self.proto.get_form_initial_data(licenses=licenses)
args.update(form_fields)
# The forms needs the pk of LicenceChooser object
args['license'] = self.lc.pk
form = self.proto.get_bound_form(args)
if not form.is_valid():
print(form.errors)
self.assertTrue(form.is_valid())
pdf = self.pdfpath
deposit_result = self.proto.submit_deposit_wrapper(pdf,
form, dry_run=dry_run)
self.assertIsInstance(deposit_result, DepositResult)
self.assertIsInstance(deposit_result.additional_info, list)
for i in deposit_result.additional_info:
self.assertNotEqual(i.get('label'), None)
self.assertNotEqual(i.get('value'), None)
return deposit_result
def assertEqualOrLog(self, a, b):
"""
Same as assertEqual but prints logs before failing
if the two quantities are not equal.
"""
if a != b:
print(self.proto._logs)
self.assertEqual(a, b)
class ProtocolRegistryTest(django.test.TestCase):
def test_get(self):
c = protocol_registry.get('ZenodoProtocol')
self.assertTrue(issubclass(c, RepositoryProtocol))
|
edxnercel/edx-platform | refs/heads/master | openedx/core/djangoapps/course_groups/tests/helpers.py | 77 | """
Helper methods for testing cohorts.
"""
import factory
from factory import post_generation, Sequence
from factory.django import DjangoModelFactory
import json
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from ..cohorts import set_course_cohort_settings
from ..models import CourseUserGroup, CourseCohort, CourseCohortsSettings
class CohortFactory(DjangoModelFactory):
"""
Factory for constructing mock cohorts.
"""
FACTORY_FOR = CourseUserGroup
name = Sequence("cohort{}".format)
course_id = SlashSeparatedCourseKey("dummy", "dummy", "dummy")
group_type = CourseUserGroup.COHORT
@post_generation
def users(self, create, extracted, **kwargs): # pylint: disable=unused-argument
"""
Returns the users associated with the cohort.
"""
if extracted:
self.users.add(*extracted)
class CourseCohortFactory(DjangoModelFactory):
"""
Factory for constructing mock course cohort.
"""
FACTORY_FOR = CourseCohort
course_user_group = factory.SubFactory(CohortFactory)
assignment_type = 'manual'
class CourseCohortSettingsFactory(DjangoModelFactory):
"""
Factory for constructing mock course cohort settings.
"""
FACTORY_FOR = CourseCohortsSettings
is_cohorted = False
course_id = SlashSeparatedCourseKey("dummy", "dummy", "dummy")
cohorted_discussions = json.dumps([])
# pylint: disable=invalid-name
always_cohort_inline_discussions = True
def topic_name_to_id(course, name):
"""
Given a discussion topic name, return an id for that name (includes
course and url_name).
"""
return "{course}_{run}_{name}".format(
course=course.location.course,
run=course.url_name,
name=name
)
def config_course_cohorts_legacy(
course,
discussions,
cohorted,
cohorted_discussions=None,
auto_cohort_groups=None,
always_cohort_inline_discussions=None # pylint: disable=invalid-name
):
"""
Given a course with no discussion set up, add the discussions and set
the cohort config on the course descriptor.
Since cohort settings are now stored in models.CourseCohortSettings,
this is only used for testing data migration from the CourseDescriptor
to the table.
Arguments:
course: CourseDescriptor
discussions: list of topic names strings. Picks ids and sort_keys
automatically.
cohorted: bool.
cohorted_discussions: optional list of topic names. If specified,
converts them to use the same ids as topic names.
auto_cohort_groups: optional list of strings
(names of groups to put students into).
Returns:
Nothing -- modifies course in place.
"""
def to_id(name):
return topic_name_to_id(course, name)
topics = dict((name, {"sort_key": "A",
"id": to_id(name)})
for name in discussions)
course.discussion_topics = topics
config = {"cohorted": cohorted}
if cohorted_discussions is not None:
config["cohorted_discussions"] = [to_id(name)
for name in cohorted_discussions]
if auto_cohort_groups is not None:
config["auto_cohort_groups"] = auto_cohort_groups
if always_cohort_inline_discussions is not None:
config["always_cohort_inline_discussions"] = always_cohort_inline_discussions
course.cohort_config = config
try:
# Not implemented for XMLModulestore, which is used by test_cohorts.
modulestore().update_item(course, ModuleStoreEnum.UserID.test)
except NotImplementedError:
pass
# pylint: disable=dangerous-default-value
def config_course_cohorts(
course,
is_cohorted,
auto_cohorts=[],
manual_cohorts=[],
discussion_topics=[],
cohorted_discussions=[],
always_cohort_inline_discussions=True # pylint: disable=invalid-name
):
"""
Set discussions and configure cohorts for a course.
Arguments:
course: CourseDescriptor
is_cohorted (bool): Is the course cohorted?
auto_cohorts (list): Names of auto cohorts to create.
manual_cohorts (list): Names of manual cohorts to create.
discussion_topics (list): Discussion topic names. Picks ids and
sort_keys automatically.
cohorted_discussions: Discussion topics to cohort. Converts the
list to use the same ids as discussion topic names.
always_cohort_inline_discussions (bool): Whether inline discussions
should be cohorted by default.
Returns:
Nothing -- modifies course in place.
"""
def to_id(name):
"""Convert name to id."""
return topic_name_to_id(course, name)
set_course_cohort_settings(
course.id,
is_cohorted=is_cohorted,
cohorted_discussions=[to_id(name) for name in cohorted_discussions],
always_cohort_inline_discussions=always_cohort_inline_discussions
)
for cohort_name in auto_cohorts:
cohort = CohortFactory(course_id=course.id, name=cohort_name)
CourseCohortFactory(course_user_group=cohort, assignment_type=CourseCohort.RANDOM)
for cohort_name in manual_cohorts:
cohort = CohortFactory(course_id=course.id, name=cohort_name)
CourseCohortFactory(course_user_group=cohort, assignment_type=CourseCohort.MANUAL)
course.discussion_topics = dict((name, {"sort_key": "A", "id": to_id(name)})
for name in discussion_topics)
try:
# Not implemented for XMLModulestore, which is used by test_cohorts.
modulestore().update_item(course, ModuleStoreEnum.UserID.test)
except NotImplementedError:
pass
|
techtonik/wesnoth | refs/heads/master | scons/gettext.py | 24 | # vi: syntax=python:et:ts=4
from os.path import join
import os
from SCons.Builder import Builder
from SCons.Util import WhereIs
from config_check_utils import find_include
def exists():
return True
def generate(env):
env.AppendENVPath("PATH", join(env["gettextdir"], "bin"))
env["MSGFMT"] = WhereIs("msgfmt")
msgfmt = Builder(
action = "$MSGFMT --check-format --check-domain --statistics -o $TARGET $SOURCE",
src_suffix = ".po",
suffix = ".mo",
single_source = True
)
env["BUILDERS"]["Msgfmt"] = msgfmt
env["MSGMERGE"] = WhereIs("msgmerge")
msgmerge = Builder(
action = "$MSGMERGE $TARGET $SOURCE -o $TARGET",
src_suffix = ".pot",
suffix = ".po",
single_source = True
)
env["BUILDERS"]["MsgMerge"] = msgmerge
env["MSGINIT"] = WhereIs("msginit")
msginit = Builder(
action = "$MSGINIT -i $SOURCE -o $TARGET --no-translator",
src_suffix = ".pot",
suffix = ".po",
single_source = True
)
env["BUILDERS"]["MsgInit"] = msginit
env["ENV"]["LANG"] = os.environ.get("LANG")
def MsgInitMerge(env, target, source):
if os.path.exists(target + ".po"):
return env.MsgMerge(target, source)
else:
return env.MsgInit(target, source)
env.AddMethod(MsgInitMerge)
env["PO4A_GETTEXTIZE"] = WhereIs("po4a-gettextize")
po4a_gettextize = Builder(
action = "$PO4A_GETTEXTIZE -f $PO4A_FORMAT ${''.join([' -m ' + str(source) for source in SOURCES])} -p $TARGET",
)
env["BUILDERS"]["Po4aGettextize"] = po4a_gettextize
env["PO4A_TRANSLATE"] = WhereIs("po4a-translate")
po4a_translate = Builder(
action = "$PO4A_TRANSLATE -f $PO4A_FORMAT -L $PO4A_CHARSET -m ${SOURCES[0]} -p ${SOURCES[1]} -l $TARGET"
)
env["BUILDERS"]["Po4aTranslate"] = po4a_translate
def CheckGettextLibintl(context):
env = context.env
backup = env.Clone().Dictionary()
context.Message("Checking for Gettext's libintl... ")
test_program = """
#include <libintl.h>
int main()
{
textdomain("test");
char* text = gettext("foo");
}
\n"""
if not env.get("gettextdir") and context.TryLink(test_program, ".c"):
context.Result("libc built-in")
return True
prefixes = [env["prefix"]]
if env.get("gettextdir"):
prefixes = [env["gettextdir"]] + prefixes
includes = find_include(prefixes, "libintl.h", default_prefixes=not env["host"])
if includes:
env.AppendUnique(
CPPPATH = [join(includes[0][0], "include")],
LIBPATH = [join(includes[0][0], "lib")]
)
env.AppendUnique(LIBS = ["intl"])
if context.TryLink("/* external libintl*/\n" + test_program, ".c"):
context.Result("external")
return True
context.Result("no")
env.Replace(**backup)
return False
config_checks = { "CheckGettextLibintl" : CheckGettextLibintl }
|
pradyunsg/pip | refs/heads/update-to-libera | src/pip/_internal/operations/build/__init__.py | 12133432 | |
RandyLowery/erpnext | refs/heads/develop | erpnext/schools/doctype/student_attendance/__init__.py | 12133432 | |
purpleidea/macaronic-net | refs/heads/master | django/contrib/admindocs/__init__.py | 12133432 | |
popazerty/dvbapp2-gui | refs/heads/master | lib/python/Plugins/Extensions/Modem/__init__.py | 12133432 | |
christianurich/DynaMind-ToolBox | refs/heads/master | DynaMind-GDALModules/scripts/GDALModules/dm_import_landcover_geoscape.py | 1 | from pydynamind import *
import gdal, osr
from gdalconst import *
import struct
import numpy as np
import compiler
import paramiko
import os
import gc
class DM_ImportLandCoverGeoscape(Module):
display_name = "Import Landcover Geoscape"
group_name = "Data Import and Export"
def getHelpUrl(self):
return "/DynaMind-GDALModules/dm_import_landuse.html"
def __init__(self):
Module.__init__(self)
self.setIsGDALModule(True)
self.createParameter("view_name", STRING)
self.view_name = "node"
self.createParameter("view_name_grid", STRING)
self.view_name_grid = "city"
self.createParameter("raster_file", FILENAME)
self.raster_file = ""
self.createParameter("username", STRING)
self.username = ""
self.createParameter("password", STRING)
self.password = ""
self.createParameter("port", INT)
self.port = 22
self.createParameter("host", STRING)
self.host = ""
self.createParameter("big_raster_file", BOOL)
self.big_raster_file = False
self.createParameter("transform", BOOL)
self.transform = True
self.transport = None
self.sftp = None
self.downloaded_file = ""
self.real_file_name = ""
self.index_max = 0
self.geoscape_landclass = None
def generate_downloaded_file_name(self):
return self.raster_file + self.username + self.password + self.host
def connect(self):
established = False
try:
log(str(self.host) + " " + str(self.port) + " " + str(self.username) + " " + str(self.password), Standard)
self.transport = paramiko.Transport((self.host, self.port))
self.transport.connect(username=self.username, password=self.password)
established = True
except:
return False
log("connected", Standard)
self.sftp = paramiko.SFTPClient.from_transport(self.transport)
return True
def close(self):
log("close connection", Standard)
self.sftp.close()
self.transport.close()
self.sftp = None
self.transport = None
def get_file(self, file_name):
if self.real_file_name:
os.remove(self.real_file_name)
self.real_file_name = "/tmp/" + str(uuid.uuid4())
try:
self.sftp.get(file_name, self.real_file_name)
except Exception as e:
print(e)
self.real_file_name = ""
return False
self.downloaded_file = self.generate_downloaded_file_name()
return True
def init(self):
self.node_view = ViewContainer(self.view_name, FACE, READ)
self.city = ViewContainer(self.view_name_grid, FACE, READ)
if self.transform:
self.geoscape_landclass = {
2: 5, # grass
3: 13,
4: 7,
5: 1,
6: 7,
7: 15,
8: 2,
9: 12,
10: -1,
11: -1,
12: 2
}
self.index_max = 12
else:
self.geoscape_landclass = {
1: 1, # trees
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 12,
13: 13,
14: 14,
15: 15
}
self.index_max = 15
self.landuse_classes = {
"tree_cover_fraction": 1,
"water_fraction": 2,
"pond_and_basin_fraction": 3,
"wetland_fraction": 4,
"grass_fraction": 5,
"swale_fraction": 6,
"irrigated_grass_fraction": 7,
"bio_retention_fraction": 8,
"infiltration_fraction": 9,
"green_roof_fraction": 10,
"green_wall_fraction": 11,
"roof_fraction": 12,
"road_fraction": 13,
"porous_fraction": 14,
"concrete_fraction": 15
}
for key in self.landuse_classes:
self.node_view.addAttribute(key, Attribute.DOUBLE, WRITE)
# self.node_view.addAttribute(key, Attribute.INT, WRITE)
self.node_view.addAttribute("geoscape_count", Attribute.INT, WRITE)
self.node_view.addAttribute("geoscape_missed", Attribute.INT, WRITE)
self.registerViewContainers([self.node_view, self.city ])
def run(self):
if self.host:
if not self.connect():
log("Connection to host failed", Error)
return
if not self.get_file(self.raster_file):
log("Failed to download file", Error)
return
else:
self.real_file_name = self.raster_file
dataset = gdal.Open(self.real_file_name, GA_ReadOnly)
if not dataset:
log("Failed to open file", Error)
self.setStatus(MOD_EXECUTION_ERROR)
return
band = dataset.GetRasterBand(1)
gt = dataset.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(dataset.GetProjection())
srsLatLong = osr.SpatialReference()
srsLatLong.ImportFromEPSG(self.getSimulationConfig().getCoorindateSystem())
ct = osr.CoordinateTransformation(srsLatLong, srs)
areas = []
for c_idx, c in enumerate(self.city):
log(str(c.GetFID()), Standard )
geom = c.GetGeometryRef()
env = geom.GetEnvelope()
p1 = ct.TransformPoint(env[0], env[2])
p2 = ct.TransformPoint(env[1], env[3])
minx = int((p1[0] - gt[0]) / gt[1])
miny = int((p1[1] - gt[3]) / gt[5])
maxx = int((p2[0] - gt[0]) / gt[1])
maxy = int((p2[1] - gt[3]) / gt[5])
if miny > maxy:
min_y_tmp = miny
miny = maxy
maxy = min_y_tmp
minx -= 100
miny -= 100
maxx += 100
maxy += 100
areas.append([c.GetFID(), minx, miny, maxx - minx, maxy - miny])
self.city.finalise()
for a in areas:
log(str(a), Standard)
if self.big_raster_file:
self.node_view.set_attribute_filter(self.view_name_grid + "_id = " + str(a[0]))
else:
self.node_view.reset_reading()
# a[2] = 0
log(str(a), Standard)
log(str(band.XSize), Standard)
log(str(band.YSize), Standard)
# Check if we are within bounds
if a[2] < 0:
a[2] = 0
if a[1] < 0:
a[1] = 0
if a[1] + a[3] > band.XSize:
a[3] = band.XSize - a[1]
if a[2] + a[4] > band.YSize:
a[4] = band.YSize - a[2]
log(str(a), Standard)
values = band.ReadAsArray(a[1], a[2], a[3], a[4])
if values is None:
log("Something went terribly wrong and I didn't get any data", Error)
return
gminy = a[2]
gminx = a[1]
for node_idx, node in enumerate(self.node_view):
geom = node.GetGeometryRef()
env = geom.GetEnvelope()
p1 = ct.TransformPoint(env[0], env[2])
p2 = ct.TransformPoint(env[1], env[3])
minx = int((p1[0] - gt[0]) / gt[1])
miny = int((p1[1] - gt[3]) / gt[5])
maxx = int((p2[0] - gt[0]) / gt[1])
maxy = int((p2[1] - gt[3]) / gt[5])
if miny > maxy:
min_y_tmp = miny
miny = maxy
maxy = min_y_tmp
val_array = np.zeros(16)
missed = 0
count = 0
for x in range(minx, maxx + 1):
for y in range(miny, maxy + 1):
if x < 0 or y < 0 or x > band.XSize - 1 or y > band.YSize - 1:
continue
try:
idx = int(values[int(y) - gminy][int(x) - gminx])
except IndexError:
missed+=1
continue
if idx < 1 or idx > self.index_max:
continue
val = self.geoscape_landclass[idx]
if val < 1:
continue
val_array[val] += 1
count+=1
node.SetField("geoscape_count", count)
node.SetField("geoscape_missed", missed)
for key in self.landuse_classes:
if val_array.sum() < 1:
continue
node.SetField(key, float(val_array[self.landuse_classes[key]] / val_array.sum()))
self.node_view.sync()
self.node_view.set_next_by_index(0)
# gc.collect()
self.node_view.finalise()
print("finalise city")
|
iut-ibk/DynaMind-ToolBox | refs/heads/master | DynaMind-Performance-Assessment/scripts/DMPerformance/dm_wtp_extreme_heat.py | 1 | __author__ = 'christianurich'
from pydynamind import *
from scipy import random
# agent_wtp(Region_ID,agent,'wtp_stream_high1') = 10.99824 + 0.3368328*agent_wtp(Region_ID,agent,'Bedroom') - 0.0334691*agent_wtp(Region_ID,agent,'Age') - 0.0765181*agent_wtp(Region_ID,agent,'Education');
class WTP_Extream_Heat_AU(Module):
display_name = "WTP Extreme Heat (AU)"
group_name = "Performance Assessment"
def __init__(self):
Module.__init__(self)
self.setIsGDALModule(True)
self.households = ViewContainer("household", DM.COMPONENT, DM.READ)
self.households.addAttribute("age", DM.Attribute.INT, DM.READ)
self.households.addAttribute("bedrooms", DM.Attribute.INT, DM.READ)
self.households.addAttribute("education", DM.Attribute.STRING, DM.READ)
self.households.addAttribute("wtp_extreme_heat", DM.Attribute.DOUBLE, DM.WRITE)
self.registerViewContainers([self.households])
# self.education_levels = {}
# self.education_levels[""] = 1.
# self.education_levels["technical"] = 1.
# self.education_levels["other"] = 2.
# self.education_levels["secondary"] = 3.
# self.education_levels["tertiary"] = 4.
self.education_levels = {}
self.education_levels[""] = 0.
self.education_levels["technical"] = 0.
self.education_levels["other"] = 0.
self.education_levels["secondary"] = 1.
self.education_levels["tertiary"] = 1.
def run(self):
self.households.reset_reading()
counter = 0
for h in self.households:
counter+=1
wtp = max(0.0, random.normal(7.348656, 9.407043))
h.SetField("wtp_extreme_heat", wtp)
if counter % 100000 == 0:
self.households.sync()
self.__container.set_next_by_index(counter)
self.households.finalise()
|
Hardtack/TypeConverter | refs/heads/master | setup.py | 1 | # -*- encoding: utf-8 -*-
"""TypeConverter
~~~~~~~~~~~~~~~~~
Quick and dirty python type converter.
"""
from setuptools import setup
from typeconverter import (__version__ as version, __license__ as license,
__author__ as author, __email__ as email)
setup(
name='TypeConverter',
version=version,
py_modules=['typeconverter'],
url='https://github.com/hardtack/typeconverter',
license=license,
author=author,
author_email=email,
description='Simple type converter',
long_description=__doc__,
)
|
djaodjin/djaodjin-survey | refs/heads/master | survey/views/edit.py | 1 | # Copyright (c) 2019, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.views.generic import DeleteView
from ..compat import reverse
from ..models import Sample
from ..mixins import SampleMixin
class QuizzSampleDeleteView(SampleMixin, DeleteView):
"""
Delete Sample to a quizz and redirects to its first question.
"""
model = Sample
def get_success_url(self):
return reverse('survey_sample_new',
kwargs={'campaign':self.object.campaign.slug})
def get_object(self, queryset=None):
#pylint:disable=not-a-mapping
return self.get_sample(**self.kwargs)
|
yuanagain/seniorthesis | refs/heads/master | venv/lib/python2.7/site-packages/scipy/weave/examples/cast_copy_transpose.py | 100 | """ Cast Copy Tranpose is used in numpy LinearAlgebra.py to convert
C ordered arrays to Fortran order arrays before calling Fortran
functions. A couple of C implementations are provided here that
show modest speed improvements. One is an "inplace" transpose that
does an in memory transpose of an arrays elements. This is the
fastest approach and is beneficial if you don't need to keep the
original array.
"""
# C:\home\ej\wrk\scipy\compiler\examples>python cast_copy_transpose.py
# Cast/Copy/Transposing (150,150)array 1 times
# speed in python: 0.870999932289
# speed in c: 0.25
# speed up: 3.48
# inplace transpose c: 0.129999995232
# speed up: 6.70
from __future__ import absolute_import, print_function
import numpy
from numpy import *
import sys
sys.path.insert(0,'..')
import scipy.weave.inline_tools as inline_tools
import scipy.weave.c_spec as c_spec
from scipy.weave.converters import blitz as cblitz
def _cast_copy_transpose(type,a_2d):
assert(len(shape(a_2d)) == 2)
new_array = zeros(shape(a_2d),type)
code = """
for(int i = 0; i < Na_2d[0]; i++)
for(int j = 0; j < Na_2d[1]; j++)
new_array(i,j) = a_2d(j,i);
"""
inline_tools.inline(code,['new_array','a_2d'],
type_converters=cblitz,
compiler='gcc',
verbose=1)
return new_array
def _cast_copy_transpose2(type,a_2d):
assert(len(shape(a_2d)) == 2)
new_array = zeros(shape(a_2d),type)
code = """
const int I = Na_2d[0];
const int J = Na_2d[1];
for(int i = 0; i < I; i++)
{
int new_off = i*J;
int old_off = i;
for(int j = 0; j < J; j++)
{
new_array[new_off++] = a_2d[old_off];
old_off += I;
}
}
"""
inline_tools.inline(code,['new_array','a_2d'],compiler='gcc',verbose=1)
return new_array
def _inplace_transpose(a_2d):
assert(len(shape(a_2d)) == 2)
numeric_type = c_spec.num_to_c_types[a_2d.dtype.char]
code = """
%s temp;
for(int i = 0; i < Na_2d[0]; i++)
for(int j = 0; j < Na_2d[1]; j++)
{
temp = a_2d(i,j);
a_2d(i,j) = a_2d(j,i);
a_2d(j,i) = temp;
}
""" % numeric_type
inline_tools.inline(code,['a_2d'],
type_converters=cblitz,
compiler='gcc',
extra_compile_args=['-funroll-all-loops'],
verbose=2)
return a_2d
#assert(len(shape(a_2d)) == 2)
#type = a_2d.typecode()
#new_array = zeros(shape(a_2d),type)
##trans_a_2d = transpose(a_2d)
#numeric_type = c_spec.num_to_c_types[type]
#code = """
# for(int i = 0; i < Na_2d[0]; i++)
# for(int j = 0; j < Na_2d[1]; j++)
# new_array(i,j) = (%s) a_2d(j,i);
# """ % numeric_type
#inline_tools.inline(code,['new_array','a_2d'],
# type_converters = cblitz,
# compiler='gcc',
# verbose = 1)
#return new_array
def cast_copy_transpose(type,*arrays):
results = []
for a in arrays:
results.append(_cast_copy_transpose(type,a))
if len(results) == 1:
return results[0]
else:
return results
def cast_copy_transpose2(type,*arrays):
results = []
for a in arrays:
results.append(_cast_copy_transpose2(type,a))
if len(results) == 1:
return results[0]
else:
return results
def inplace_cast_copy_transpose(*arrays):
results = []
for a in arrays:
results.append(_inplace_transpose(a))
if len(results) == 1:
return results[0]
else:
return results
def _castCopyAndTranspose(type, *arrays):
cast_arrays = ()
import copy
for a in arrays:
if a.dtype == numpy.dtype(type):
cast_arrays = cast_arrays + (copy.copy(numpy.transpose(a)),)
else:
cast_arrays = cast_arrays + (copy.copy(
numpy.transpose(a).astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
import time
def compare(m,n):
a = ones((n,n),float64)
type = float32
print('Cast/Copy/Transposing (%d,%d)array %d times' % (n,n,m))
t1 = time.time()
for i in range(m):
for i in range(n):
b = _castCopyAndTranspose(type,a)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1)/m)
# load into cache
b = cast_copy_transpose(type,a)
t1 = time.time()
for i in range(m):
for i in range(n):
b = cast_copy_transpose(type,a)
t2 = time.time()
print(' speed in c (blitz):',(t2 - t1) / m)
print(' speed up (blitz): %3.2f' % (py/(t2-t1)))
# load into cache
b = cast_copy_transpose2(type,a)
t1 = time.time()
for i in range(m):
for i in range(n):
b = cast_copy_transpose2(type,a)
t2 = time.time()
print(' speed in c (pointers):',(t2 - t1) / m)
print(' speed up (pointers): %3.2f' % (py/(t2-t1)))
# inplace tranpose
b = _inplace_transpose(a)
t1 = time.time()
for i in range(m):
for i in range(n):
b = _inplace_transpose(a)
t2 = time.time()
print(' inplace transpose c:',(t2 - t1) / m)
print(' speed up: %3.2f' % (py/(t2-t1)))
if __name__ == "__main__":
m,n = 1,500
compare(m,n)
|
lunafeng/django | refs/heads/master | tests/inspectdb/models.py | 208 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class People(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey('self', models.CASCADE)
class Message(models.Model):
from_field = models.ForeignKey(People, models.CASCADE, db_column='from_id')
class PeopleData(models.Model):
people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True)
ssn = models.CharField(max_length=11)
class PeopleMoreData(models.Model):
people_unique = models.ForeignKey(People, models.CASCADE, unique=True)
license = models.CharField(max_length=255)
class DigitsInColumnName(models.Model):
all_digits = models.CharField(max_length=11, db_column='123')
leading_digit = models.CharField(max_length=11, db_column='4extra')
leading_digits = models.CharField(max_length=11, db_column='45extra')
class SpecialName(models.Model):
field = models.IntegerField(db_column='field')
# Underscores
field_field_0 = models.IntegerField(db_column='Field_')
field_field_1 = models.IntegerField(db_column='Field__')
field_field_2 = models.IntegerField(db_column='__field')
# Other chars
prc_x = models.IntegerField(db_column='prc(%) x')
non_ascii = models.IntegerField(db_column='tamaño')
class Meta:
db_table = "inspectdb_special.table name"
class ColumnTypes(models.Model):
id = models.AutoField(primary_key=True)
big_int_field = models.BigIntegerField()
bool_field = models.BooleanField(default=False)
null_bool_field = models.NullBooleanField()
char_field = models.CharField(max_length=10)
null_char_field = models.CharField(max_length=10, blank=True, null=True)
comma_separated_int_field = models.CommaSeparatedIntegerField(max_length=99)
date_field = models.DateField()
date_time_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=6, decimal_places=1)
email_field = models.EmailField()
file_field = models.FileField(upload_to="unused")
file_path_field = models.FilePathField()
float_field = models.FloatField()
int_field = models.IntegerField()
gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4")
pos_int_field = models.PositiveIntegerField()
pos_small_int_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField()
small_int_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField()
class UniqueTogether(models.Model):
field1 = models.IntegerField()
field2 = models.CharField(max_length=10)
class Meta:
unique_together = ('field1', 'field2')
|
bob-the-hamster/commandergenius | refs/heads/sdl_android | project/jni/python/src/Demo/parser/test_parser.py | 41 | #! /usr/bin/env python
# (Force the script to use the latest build.)
#
# test_parser.py
import parser, traceback
_numFailed = 0
def testChunk(t, fileName):
global _numFailed
print '----', fileName,
try:
ast = parser.suite(t)
tup = parser.ast2tuple(ast)
# this discards the first AST; a huge memory savings when running
# against a large source file like Tkinter.py.
ast = None
new = parser.tuple2ast(tup)
except parser.ParserError, err:
print
print 'parser module raised exception on input file', fileName + ':'
traceback.print_exc()
_numFailed = _numFailed + 1
else:
if tup != parser.ast2tuple(new):
print
print 'parser module failed on input file', fileName
_numFailed = _numFailed + 1
else:
print 'o.k.'
def testFile(fileName):
t = open(fileName).read()
testChunk(t, fileName)
def test():
import sys
args = sys.argv[1:]
if not args:
import glob
args = glob.glob("*.py")
args.sort()
map(testFile, args)
sys.exit(_numFailed != 0)
if __name__ == '__main__':
test()
|
wildermason/external_skia | refs/heads/dark-5.1 | gm/rebaseline_server/compare_rendered_pictures_test.py | 67 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test compare_rendered_pictures.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
import os
import subprocess
import sys
# Imports from within Skia
import base_unittest
import compare_rendered_pictures
import results
import gm_json # must import results first, so that gm_json will be in sys.path
class CompareRenderedPicturesTest(base_unittest.TestCase):
def test_endToEnd(self):
"""Generate two sets of SKPs, run render_pictures over both, and compare
the results."""
self._generate_skps_and_run_render_pictures(
subdir='before_patch', skpdict={
'changed.skp': 200,
'unchanged.skp': 100,
'only-in-before.skp': 128,
})
self._generate_skps_and_run_render_pictures(
subdir='after_patch', skpdict={
'changed.skp': 201,
'unchanged.skp': 100,
'only-in-after.skp': 128,
})
results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
actuals_root=self._temp_dir,
subdirs=('before_patch', 'after_patch'),
generated_images_root=self._temp_dir,
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
os.path.join(self._output_dir_actual, 'compare_rendered_pictures.json'))
def _generate_skps_and_run_render_pictures(self, subdir, skpdict):
"""Generate SKPs and run render_pictures on them.
Args:
subdir: subdirectory (within self._temp_dir) to write all files into
skpdict: {skpname: redvalue} dictionary describing the SKP files to render
"""
out_path = os.path.join(self._temp_dir, subdir)
os.makedirs(out_path)
for skpname, redvalue in skpdict.iteritems():
self._run_skpmaker(
output_path=os.path.join(out_path, skpname), red=redvalue)
# TODO(epoger): Add --mode tile 256 256 --writeWholeImage to the unittest,
# and fix its result! (imageURLs within whole-image entries are wrong when
# I tried adding that)
binary = self.find_path_to_program('render_pictures')
return subprocess.check_output([
binary,
'--clone', '1',
'--config', '8888',
'-r', out_path,
'--writeChecksumBasedFilenames',
'--writeJsonSummaryPath', os.path.join(out_path, 'summary.json'),
'--writePath', out_path])
def _run_skpmaker(self, output_path, red=0, green=0, blue=0,
width=640, height=400):
"""Runs the skpmaker binary to generate SKP with known characteristics.
Args:
output_path: Filepath to write the SKP into.
red: Value of red color channel in image, 0-255.
green: Value of green color channel in image, 0-255.
blue: Value of blue color channel in image, 0-255.
width: Width of canvas to create.
height: Height of canvas to create.
"""
binary = self.find_path_to_program('skpmaker')
return subprocess.check_output([
binary,
'--red', str(red),
'--green', str(green),
'--blue', str(blue),
'--width', str(width),
'--height', str(height),
'--writePath', str(output_path)])
def mock_get_timestamp():
"""Mock version of BaseComparisons.get_timestamp() for testing."""
return 12345678
def main():
base_unittest.main(CompareRenderedPicturesTest)
if __name__ == '__main__':
main()
|
acsone/purchase-workflow | refs/heads/8.0 | purchase_supplier_rounding_method/models/account_invoice_line.py | 1 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 - Today: GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
@api.one
def _compute_price(self):
"""Overwrite price subtotal computation if the partner has a
supplier rounding method set to 'Round Net Price'"""
invoice = self.invoice_id
if invoice and invoice.type in ['in_invoice', 'in_refund'] and\
invoice.partner_id.supplier_rounding_method\
== 'round_net_price':
price = round(
self.price_unit * (1 - (self.discount or 0.0) / 100.0),
self.env['decimal.precision'].precision_get('Account'))
taxes = self.invoice_line_tax_id.compute_all(
price, self.quantity, product=self.product_id,
partner=invoice.partner_id)
self.price_subtotal = taxes['total']
self.price_subtotal = invoice.currency_id.round(
self.price_subtotal)
else:
return super(AccountInvoiceLine, self)._compute_price()
|
okomestudio/moto | refs/heads/master | tests/test_kinesis/test_server.py | 33 | from __future__ import unicode_literals
import json
import sure # noqa
import moto.server as server
from moto import mock_kinesis
'''
Test the different server responses
'''
@mock_kinesis
def test_list_streams():
backend = server.create_backend_app("kinesis")
test_client = backend.test_client()
res = test_client.get('/?Action=ListStreams')
json_data = json.loads(res.data.decode("utf-8"))
json_data.should.equal({
"HasMoreStreams": False,
"StreamNames": [],
})
|
jemekite/p2pool-deepcoin | refs/heads/master | nattraverso/portmapper.py | 288 | """
Generic NAT Port mapping interface.
TODO: Example
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{[email protected]<mailto:[email protected]>}
@version: 0.1.0
"""
__revision__ = "$id"
from twisted.internet.base import BasePort
# Public API
def get_port_mapper(proto="TCP"):
"""
Returns a L{NATMapper} instance, suited to map a port for
the given protocol. Defaults to TCP.
For the moment, only upnp mapper is available. It accepts both UDP and TCP.
@param proto: The protocol: 'TCP' or 'UDP'
@type proto: string
@return: A deferred called with a L{NATMapper} instance
@rtype: L{twisted.internet.defer.Deferred}
"""
import nattraverso.pynupnp
return nattraverso.pynupnp.get_port_mapper()
class NATMapper:
"""
Define methods to map port objects (as returned by twisted's listenXX).
This allows NAT to be traversed from incoming packets.
Currently the only implementation of this class is the UPnP Mapper, which
can map UDP and TCP ports, if an UPnP Device exists.
"""
def __init__(self):
raise NotImplementedError("Cannot instantiate the class")
def map(self, port):
"""
Create a mapping for the given twisted's port object.
The deferred will call back with a tuple (extaddr, extport):
- extaddr: The ip string of the external ip address of this host
- extport: the external port number used to map the given Port object
When called multiple times with the same Port,
callback with the existing mapping.
@param port: The port object to map
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with the above defined tuple
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def info(self, port):
"""
Returns the existing mapping for the given port object. That means map()
has to be called before.
@param port: The port object to retreive info from
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@raise ValueError: When there is no such existing mapping
@return: a tuple (extaddress, extport).
@see: L{map() function<map>}
"""
raise NotImplementedError
def unmap(self, port):
"""
Remove an existing mapping for the given twisted's port object.
@param port: The port object to unmap
@type port: a L{twisted.internet.interfaces.IListeningPort} object
@return: A deferred called with None
@rtype: L{twisted.internet.defer.Deferred}
@raise ValueError: When there is no such existing mapping
"""
raise NotImplementedError
def get_port_mappings(self):
"""
Returns a deferred that will be called with a dictionnary of the
existing mappings.
The dictionnary structure is the following:
- Keys: tuple (protocol, external_port)
- protocol is "TCP" or "UDP".
- external_port is the external port number, as see on the
WAN side.
- Values:tuple (internal_ip, internal_port)
- internal_ip is the LAN ip address of the host.
- internal_port is the internal port number mapped
to external_port.
@return: A deferred called with the above defined dictionnary
@rtype: L{twisted.internet.defer.Deferred}
"""
raise NotImplementedError
def _check_valid_port(self, port):
"""Various Port object validity checks. Raise a ValueError."""
if not isinstance(port, BasePort):
raise ValueError("expected a Port, got %r"%(port))
if not port.connected:
raise ValueError("Port %r is not listening"%(port))
loc_addr = port.getHost()
if loc_addr.port == 0:
raise ValueError("Port %r has port number of 0"%(port))
|
jimmida/incubator-hawq | refs/heads/master | tools/bin/gppylib/util/san_utils.py | 30 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# Greenplum SAN related utility functions
import os
from gppylib.commands import unix
class SanCmds():
INQ=None
NAVISECCLI=None
POWERMT=None
STAT=None
GP_MOUNT_AGENT=None
MOUNT=None
def __init__(self):
gphome = os.getenv('GPHOME', None) + '/bin/'
try:
self.GP_MOUNT_AGENT = unix.findCmdInPath('gp_mount_agent', [gphome], False)
except:
pass
try:
self.POWERMT = unix.findCmdInPath('powermt', printError=False)
except:
pass
try:
self.NAVISECCLI = unix.findCmdInPath('naviseccli', printError=False)
except:
pass
try:
self.STAT = unix.findCmdInPath('stat', printError=False)
except:
pass
try:
self.INQ = unix.findCmdInPath('inq', printError=False)
except:
pass
try:
self.MOUNT = unix.findCmdInPath('mount', printError=False)
except:
pass
SAN_CMDS=SanCmds()
|
julython/julython.org | refs/heads/master | july/blog/models.py | 1 |
from django.db import models
from django.db.models import permalink
from django.conf import settings
class Blog(models.Model):
title = models.CharField(max_length=100, unique=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
slug = models.SlugField(max_length=100, unique=True)
body = models.TextField()
posted = models.DateTimeField(db_index=True)
active = models.BooleanField(default=True)
category = models.ForeignKey('blog.Category')
def __unicode__(self):
return '%s' % self.title
@permalink
def get_absolute_url(self):
return ('view_blog_post', None, {'slug': self.slug})
class Category(models.Model):
title = models.CharField(max_length=100, db_index=True)
slug = models.SlugField(max_length=100, db_index=True)
def __unicode__(self):
return '%s' % self.title
@permalink
def get_absolute_url(self):
return ('view_blog_category', None, {'slug': self.slug})
|
abhishekjairath/codeyard | refs/heads/master | commit/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py | 167 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
elif command == 'A':
if params == () or params == None:
num_rows = 1
else:
num_rows = params[0]
func = winterm.cursor_up
func(num_rows, on_stderr=self.on_stderr)
|
bdang2012/taiga-back | refs/heads/master | tests/__init__.py | 12133432 | |
Vixionar/django | refs/heads/master | tests/swappable_models/tests.py | 339 | from __future__ import unicode_literals
from swappable_models.models import Article
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase, override_settings
from django.utils.six import StringIO
class SwappableModelTests(TestCase):
available_apps = [
'swappable_models',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle')
def test_generated_data(self):
"Permissions and content types are not created for a swapped model"
# Delete all permissions and content_types
Permission.objects.filter(content_type__app_label='swappable_models').delete()
ContentType.objects.filter(app_label='swappable_models').delete()
# Re-run migrate. This will re-build the permissions and content types.
new_io = StringIO()
management.call_command('migrate', interactive=False, stdout=new_io)
# Check that content types and permissions exist for the swapped model,
# but not for the swappable model.
apps_models = [(p.content_type.app_label, p.content_type.model)
for p in Permission.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
apps_models = [(ct.app_label, ct.model)
for ct in ContentType.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
@override_settings(TEST_ARTICLE_MODEL='swappable_models.article')
def test_case_insensitive(self):
"Model names are case insensitive. Check that model swapping honors this."
try:
Article.objects.all()
except AttributeError:
self.fail('Swappable model names should be case insensitive.')
self.assertIsNone(Article._meta.swapped)
|
amaas-fintech/amaas-core-sdk-python | refs/heads/master | tests/unit/csv_upload/assets/etf.py | 1 | import unittest
import csv
import random
from amaasutils.random_utils import random_string
from amaascore.csv_upload import Uploader
class ETFUploaderTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
self.asset_manager_id = self.client_id = 1
self.csvfile = 'ETFUploaderTest.csv'
self.asset_ids = [random_string(8), random_string(8)]
with open(self.csvfile, 'r+', newline='') as readfile:
reader = csv.reader(readfile)
for row in reader:
header = row
break
with open(self.csvfile, 'w+', newline='') as writefile:
writer = csv.writer(writefile)
writer.writerow(header)
writer.writerow(['ExchangeTradedFund', self.asset_ids[0], '123', '123', '123', '123', 'Active', '123', '123', '123', 'USA', '123', 'USD', '09/01/01',
'12345', '54321', 'true', '1', 'true', '2'])
writer.writerow(['ExchangeTradedFund', self.asset_ids[1], '123', '123', '123', '123', 'Active', '123', '123', '123', 'USA', '123', 'USD', '09/01/01',
'12345', '54321', 'true', '1', 'true', '2'])
def tearDown(self):
pass
def test_ETFUploadDownload(self):
Uploader().upload(asset_manager_id=self.asset_manager_id, client_id=self.client_id, csvpath=self.csvfile)
Uploader().download(csvpath=self.csvfile, asset_manager_id=self.asset_manager_id, data_id_type='asset_id', data_id_list=self.asset_ids)
if __name__ == '__main__':
unittest.main() |
fkolacek/FIT-VUT | refs/heads/master | bp-revok/python/lib/python2.7/idlelib/MultiStatusBar.py | 90 | from Tkinter import *
class MultiStatusBar(Frame):
def __init__(self, master=None, **kw):
if master is None:
master = Tk()
Frame.__init__(self, master, **kw)
self.labels = {}
def set_label(self, name, text='', side=LEFT):
if name not in self.labels:
label = Label(self, bd=1, relief=SUNKEN, anchor=W)
label.pack(side=side)
self.labels[name] = label
else:
label = self.labels[name]
label.config(text=text)
def _test():
b = Frame()
c = Text(b)
c.pack(side=TOP)
a = MultiStatusBar(b)
a.set_label("one", "hello")
a.set_label("two", "world")
a.pack(side=BOTTOM, fill=X)
b.pack()
b.mainloop()
if __name__ == '__main__':
_test()
|
ryandougherty/mwa-capstone | refs/heads/heroku | MWA_Tools/build_lfiles/compare_compress.py | 1 | #!/usr/bin/python
"""A tool for comparing raw MWA data before and after compression.
Randall Wayth. May 2014.
"""
import sys,pyfits,numpy
def compare_compress(uncomp_filename, compressed_filename):
"""
Compare uncompressed and compressed data and make a report of maximum abs
and relative difference.
"""
hdulist_u = pyfits.open(uncomp_filename)
hdulist_c = pyfits.open(compressed_filename)
# first sanity check: number of HDUs
assert len(hdulist_u) == len(hdulist_c), "Mismatch in number of HDUs"
maxdiffs=[]
reldiffs=[]
# loop through each HDU. Compare data and collect stats on max abs and relative difference
for i in range(len(hdulist_u)):
d_c = hdulist_c[i].data
d_u = hdulist_u[i].data
if d_u is None: continue
assert d_u.shape == d_c.shape, "Mismatch in shape at HDU index $d" % (i)
diff = numpy.abs(d_u - d_c).flatten()
reldiffnonzeroind = numpy.flatnonzero(numpy.fabs(d_u) > 1e-1)
reldiff = diff[reldiffnonzeroind] / numpy.abs(d_u.flatten()[reldiffnonzeroind])
p = numpy.argmax(diff)
prel = numpy.argmax(reldiff)
maxdiffs.append(diff[p])
reldiffs.append(reldiff[prel])
print "HDU %d. Max diff: %f. Rel diff at max: %g" % (i,maxdiffs[-1],reldiffs[-1])
# report:
print "Largest abs diff: "+str(numpy.max(maxdiffs))
print "Largest rel diff: "+str(numpy.max(reldiffs))
def usage():
print >> sys.stderr, "Usage:"
print >> sys.stderr, "%s uncompressed_filename compressed_filename" % (sys.argv[0])
sys.exit(0)
# execute a series of tests if invoked from the command line
if __name__ == "__main__":
if len(sys.argv) < 3: usage()
compare_compress(sys.argv[1],sys.argv[2])
|
ftomassetti/intellij-community | refs/heads/master | python/testData/copyPaste/IndentIncrease.dst.py | 83 | class A:
def bar(self):
pass
<caret>
def quux(self):
pass |
abaditsegay/arangodb | refs/heads/devel | 3rdParty/V8-4.3.61/third_party/python_26/Tools/scripts/findnocoding.py | 64 | #!/usr/bin/env python
"""List all those Python files that require a coding directive
Usage: nocoding.py dir1 [dir2...]
"""
__author__ = "Oleg Broytmann, Georg Brandl"
import sys, os, re, getopt
# our pysource module finds Python source files
try:
import pysource
except ImportError:
# emulate the module with a simple os.walk
class pysource:
has_python_ext = looks_like_python = can_be_compiled = None
def walk_python_files(self, paths, *args, **kwargs):
for path in paths:
if os.path.isfile(path):
yield path.endswith(".py")
elif os.path.isdir(path):
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".py"):
yield os.path.join(root, filename)
pysource = pysource()
print >>sys.stderr, ("The pysource module is not available; "
"no sophisticated Python source file search will be done.")
decl_re = re.compile(r"coding[=:]\s*([-\w.]+)")
def get_declaration(line):
match = decl_re.search(line)
if match:
return match.group(1)
return ''
def has_correct_encoding(text, codec):
try:
unicode(text, codec)
except UnicodeDecodeError:
return False
else:
return True
def needs_declaration(fullpath):
try:
infile = open(fullpath, 'rU')
except IOError: # Oops, the file was removed - ignore it
return None
line1 = infile.readline()
line2 = infile.readline()
if get_declaration(line1) or get_declaration(line2):
# the file does have an encoding declaration, so trust it
infile.close()
return False
# check the whole file for non-ASCII characters
rest = infile.read()
infile.close()
if has_correct_encoding(line1+line2+rest, "ascii"):
return False
return True
usage = """Usage: %s [-cd] paths...
-c: recognize Python source files trying to compile them
-d: debug output""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'cd')
except getopt.error, msg:
print >>sys.stderr, msg
print >>sys.stderr, usage
sys.exit(1)
is_python = pysource.looks_like_python
debug = False
for o, a in opts:
if o == '-c':
is_python = pysource.can_be_compiled
elif o == '-d':
debug = True
if not args:
print >>sys.stderr, usage
sys.exit(1)
for fullpath in pysource.walk_python_files(args, is_python):
if debug:
print "Testing for coding: %s" % fullpath
result = needs_declaration(fullpath)
if result:
print fullpath
|
PythonNut/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/tests/functional/base.py | 293 | import base64
import logging
import os
import unittest
import urllib
import urllib2
import urlparse
import wptserve
logging.basicConfig()
here = os.path.split(__file__)[0]
doc_root = os.path.join(here, "docroot")
class Request(urllib2.Request):
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self.method = "GET"
def get_method(self):
return self.method
def add_data(self, data):
if hasattr(data, "iteritems"):
data = urllib.urlencode(data)
print data
self.add_header("Content-Length", str(len(data)))
urllib2.Request.add_data(self, data)
class TestUsingServer(unittest.TestCase):
def setUp(self):
self.server = wptserve.server.WebTestHttpd(host="localhost",
port=0,
use_ssl=False,
certificate=None,
doc_root=doc_root)
self.server.start(False)
def tearDown(self):
self.server.stop()
def abs_url(self, path, query=None):
return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
req = Request(self.abs_url(path, query))
req.method = method
if headers is None:
headers = {}
for name, value in headers.iteritems():
req.add_header(name, value)
if body is not None:
req.add_data(body)
if auth is not None:
req.add_header("Authorization", "Basic %s" % base64.encodestring('%s:%s' % auth))
return urllib2.urlopen(req)
|
cpcloud/PyTables | refs/heads/develop | tables/node.py | 1 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: 2005-02-11
# Author: Ivan Vilata i Balaguer - [email protected]
#
# $Id$
#
########################################################################
"""PyTables nodes"""
import warnings
from tables.registry import classNameDict, classIdDict
from tables.exceptions import (ClosedNodeError, NodeError, UndoRedoWarning,
PerformanceWarning)
from tables.path import joinPath, splitPath, isVisiblePath
from tables.utils import lazyattr
from tables.undoredo import moveToShadow
from tables.attributeset import AttributeSet, NotLoggedAttributeSet
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
def _closedrepr(oldmethod):
"""Decorate string representation method to handle closed nodes.
If the node is closed, a string like this is returned::
<closed MODULE.CLASS at ADDRESS>
instead of calling `oldmethod` and returning its result.
"""
def newmethod(self):
if not self._v_isopen:
cmod = self.__class__.__module__
cname = self.__class__.__name__
addr = hex(id(self))
return '<closed %s.%s at %s>' % (cmod, cname, addr)
return oldmethod(self)
newmethod.__name__ = oldmethod.__name__
newmethod.__doc__ = oldmethod.__doc__
return newmethod
class MetaNode(type):
"""Node metaclass.
This metaclass ensures that their instance classes get registered
into several dictionaries (namely the `tables.utils.classNameDict`
class name dictionary and the `tables.utils.classIdDict` class
identifier dictionary).
It also adds sanity checks to some methods:
* Check that the node is open when calling string representation
and provide a default string if so.
"""
def __new__(class_, name, bases, dict_):
# Add default behaviour for representing closed nodes.
for mname in ['__str__', '__repr__']:
if mname in dict_:
dict_[mname] = _closedrepr(dict_[mname])
return type.__new__(class_, name, bases, dict_)
def __init__(class_, name, bases, dict_):
super(MetaNode, class_).__init__(name, bases, dict_)
# Always register into class name dictionary.
classNameDict[class_.__name__] = class_
# Register into class identifier dictionary only if the class
# has an identifier and it is different from its parents'.
cid = getattr(class_, '_c_classId', None)
if cid is not None:
for base in bases:
pcid = getattr(base, '_c_classId', None)
if pcid == cid:
break
else:
classIdDict[cid] = class_
class Node(object):
"""Abstract base class for all PyTables nodes.
This is the base class for *all* nodes in a PyTables hierarchy. It is an
abstract class, i.e. it may not be directly instantiated; however, every
node in the hierarchy is an instance of this class.
A PyTables node is always hosted in a PyTables *file*, under a *parent
group*, at a certain *depth* in the node hierarchy. A node knows its own
*name* in the parent group and its own *path name* in the file.
All the previous information is location-dependent, i.e. it may change when
moving or renaming a node in the hierarchy. A node also has
location-independent information, such as its *HDF5 object identifier* and
its *attribute set*.
This class gathers the operations and attributes (both location-dependent
and independent) which are common to all PyTables nodes, whatever their
type is. Nonetheless, due to natural naming restrictions, the names of all
of these members start with a reserved prefix (see the Group class
in :ref:`GroupClassDescr`).
Sub-classes with no children (e.g. *leaf nodes*) may define new methods,
attributes and properties to avoid natural naming restrictions. For
instance, _v_attrs may be shortened to attrs and _f_rename to
rename. However, the original methods and attributes should still be
available.
.. rubric:: Node attributes
.. attribute:: _v_depth
The depth of this node in the tree (an non-negative integer value).
.. attribute:: _v_file
The hosting File instance (see :ref:`FileClassDescr`).
.. attribute:: _v_name
The name of this node in its parent group (a string).
.. attribute:: _v_pathname
The path of this node in the tree (a string).
.. attribute:: _v_object_ID
A node identifier (may change from run to run).
"""
# This makes this class and all derived subclasses be handled by MetaNode.
__metaclass__ = MetaNode
# By default, attributes accept Undo/Redo.
_AttributeSet = AttributeSet
# <properties>
# `_v_parent` is accessed via its file to avoid upwards references.
def _g_getparent(self):
(parentPath, nodeName) = splitPath(self._v_pathname)
return self._v_file._getNode(parentPath)
_v_parent = property(
_g_getparent, None, None, ("The parent :class:`Group` instance"))
# '_v_attrs' is defined as a lazy read-only attribute.
# This saves 0.7s/3.8s.
@lazyattr
def _v_attrs(self):
"""The associated `AttributeSet` instance.
See Also
--------
tables.attributeset.AttributeSet : container for the HDF5 attributes
"""
return self._AttributeSet(self)
# '_v_title' is a direct read-write shorthand for the 'TITLE' attribute
# with the empty string as a default value.
def _g_gettitle (self):
if hasattr(self._v_attrs, 'TITLE'):
return self._v_attrs.TITLE
else:
return ''
def _g_settitle (self, title):
self._v_attrs.TITLE = title
_v_title = property(_g_gettitle, _g_settitle, None,
("A description of this node. A shorthand for "
"TITLE attribute."))
# </properties>
# This may be looked up by ``__del__`` when ``__init__`` doesn't get
# to be called. See ticket #144 for more info.
_v_isopen = False
"""Whehter this node is open or not."""
# The ``_log`` argument is only meant to be used by ``_g_copyAsChild()``
# to avoid logging the creation of children nodes of a copied sub-tree.
def __init__(self, parentNode, name, _log=True):
# Remember to assign these values in the root group constructor
# as it does not use this method implementation!
self._v_file = None
"""The hosting File instance (see :ref:`FileClassDescr`)."""
self._v_isopen = False
"""Whether this node is open or not."""
self._v_pathname = None
"""The path of this node in the tree (a string)."""
self._v_name = None
"""The name of this node in its parent group (a string)."""
self._v_depth = None
"""The depth of this node in the tree (an non-negative integer value)."""
self._v_maxTreeDepth = parentNode._v_file.params['MAX_TREE_DEPTH']
"""Maximum tree depth before warning the user."""
self._v__deleting = False
"""Is the node being deleted?"""
self._v_objectID = None
"""A node identifier (may change from run to run)."""
validate = new = self._v_new # set by subclass constructor
# Is the parent node a group? Is it open?
self._g_checkGroup(parentNode)
parentNode._g_checkOpen()
file_ = parentNode._v_file
# Will the file be able to host a new node?
if new:
file_._checkWritable()
# Bind to the parent node and set location-dependent information.
if new:
# Only new nodes need to be referenced.
# Opened nodes are already known by their parent group.
parentNode._g_refNode(self, name, validate)
self._g_setLocation(parentNode, name)
try:
# hdf5Extension operations:
# Update node attributes.
self._g_new(parentNode, name, init=True)
# Create or open the node and get its object ID.
if new:
self._v_objectID = self._g_create()
else:
self._v_objectID = self._g_open()
# The node *has* been created, log that.
if new and _log and file_.isUndoEnabled():
self._g_logCreate()
# This allows extra operations after creating the node.
self._g_postInitHook()
except:
# If anything happens, the node must be closed
# to undo every possible registration made so far.
# We do *not* rely on ``__del__()`` doing it later,
# since it might never be called anyway.
self._f_close()
raise
def _g_logCreate(self):
self._v_file._log('CREATE', self._v_pathname)
def __del__(self):
# Closed `Node` instances can not be killed and revived.
# Instead, accessing a closed and deleted (from memory, not
# disk) one yields a *new*, open `Node` instance. This is
# because of two reasons:
#
# 1. Predictability. After closing a `Node` and deleting it,
# only one thing can happen when accessing it again: a new,
# open `Node` instance is returned. If closed nodes could be
# revived, one could get either a closed or an open `Node`.
#
# 2. Ease of use. If the user wants to access a closed node
# again, the only condition would be that no references to
# the `Node` instance were left. If closed nodes could be
# revived, the user would also need to force the closed
# `Node` out of memory, which is not a trivial task.
#
if not self._v_isopen:
return # the node is already closed or not initialized
# If we get here, the `Node` is still open.
file_ = self._v_file
if self._v_pathname in file_._aliveNodes:
# If the node is alive, kill it (to save it).
file_._killNode(self)
elif file_._aliveNodes.hasdeadnodes:
# The node is already dead and there are no references to it,
# so follow the usual deletion procedure.
# This means closing the (still open) node.
# `self._v__deleting` is asserted so that the node
# does not try to unreference itself again from the file.
self._v__deleting = True
self._f_close()
def _g_preKillHook(self):
"""Code to be called before killing the node."""
pass
def _g_postReviveHook(self):
"""Code to be called after reviving the node."""
pass
def _g_create(self):
"""Create a new HDF5 node and return its object identifier."""
raise NotImplementedError
def _g_open(self):
"""Open an existing HDF5 node and return its object identifier."""
raise NotImplementedError
def _g_checkOpen(self):
"""Check that the node is open.
If the node is closed, a `ClosedNodeError` is raised.
"""
if not self._v_isopen:
raise ClosedNodeError("the node object is closed")
assert self._v_file.isopen, "found an open node in a closed file"
def _g_setLocation(self, parentNode, name):
"""Set location-dependent attributes.
Sets the location-dependent attributes of this node to reflect
that it is placed under the specified `parentNode`, with the
specified `name`.
This also triggers the insertion of file references to this
node. If the maximum recommended tree depth is exceeded, a
`PerformanceWarning` is issued.
"""
file_ = parentNode._v_file
parentDepth = parentNode._v_depth
self._v_file = file_
self._v_isopen = True
rootUEP = file_.rootUEP
if name.startswith(rootUEP):
# This has been called from File._getNode()
assert parentDepth == 0
if rootUEP == "/":
self._v_pathname = name
else:
self._v_pathname = name[len(rootUEP):]
_, self._v_name = splitPath(name)
self._v_depth = name.count("/") - rootUEP.count("/") + 1
else:
# If we enter here is because this has been called elsewhere
self._v_name = name
self._v_pathname = joinPath(parentNode._v_pathname, name)
self._v_depth = parentDepth + 1
# Check if the node is too deep in the tree.
if parentDepth >= self._v_maxTreeDepth:
warnings.warn("""\
node ``%s`` is exceeding the recommended maximum depth (%d);\
be ready to see PyTables asking for *lots* of memory and possibly slow I/O"""
% (self._v_pathname, self._v_maxTreeDepth),
PerformanceWarning)
file_._refNode(self, self._v_pathname)
def _g_updateLocation(self, newParentPath):
"""Update location-dependent attributes.
Updates location data when an ancestor node has changed its
location in the hierarchy to `newParentPath`. In fact, this
method is expected to be called by an ancestor of this node.
This also triggers the update of file references to this node.
If the maximum recommended node depth is exceeded, a
`PerformanceWarning` is issued. This warning is assured to be
unique.
"""
oldPath = self._v_pathname
newPath = joinPath(newParentPath, self._v_name)
newDepth = newPath.count('/')
self._v_pathname = newPath
self._v_depth = newDepth
# Check if the node is too deep in the tree.
if newDepth > self._v_maxTreeDepth:
warnings.warn("""\
moved descendent node is exceeding the recommended maximum depth (%d);\
be ready to see PyTables asking for *lots* of memory and possibly slow I/O"""
% (self._v_maxTreeDepth,), PerformanceWarning)
file_ = self._v_file
file_._unrefNode(oldPath)
file_._refNode(self, newPath)
# Tell dependent objects about the new location of this node.
self._g_updateDependent()
def _g_delLocation(self):
"""Clear location-dependent attributes.
This also triggers the removal of file references to this node.
"""
file_ = self._v_file
pathname = self._v_pathname
self._v_file = None
self._v_isopen = False
self._v_pathname = None
self._v_name = None
self._v_depth = None
# If the node object is being deleted,
# it has already been unreferenced from the file.
if not self._v__deleting:
file_._unrefNode(pathname)
def _g_postInitHook(self):
"""Code to be run after node creation and before creation logging."""
pass
def _g_updateDependent(self):
"""Update dependent objects after a location change.
All dependent objects (but not nodes!) referencing this node
must be updated here.
"""
if '_v_attrs' in self.__dict__:
self._v_attrs._g_updateNodeLocation(self)
def _f_close(self):
"""Close this node in the tree.
This releases all resources held by the node, so it should not
be used again. On nodes with data, it may be flushed to disk.
You should not need to close nodes manually because they are
automatically opened/closed when they are loaded/evicted from
the integrated LRU cache.
"""
# After calling ``_f_close()``, two conditions are met:
#
# 1. The node object is detached from the tree.
# 2. *Every* attribute of the node is removed.
#
# Thus, cleanup operations used in ``_f_close()`` in sub-classes
# must be run *before* calling the method in the superclass.
if not self._v_isopen:
return # the node is already closed
myDict = self.__dict__
# Close the associated `AttributeSet`
# only if it has already been placed in the object's dictionary.
if '_v_attrs' in myDict:
self._v_attrs._g_close()
# Detach the node from the tree if necessary.
self._g_delLocation()
# Finally, clear all remaining attributes from the object.
myDict.clear()
# Just add a final flag to signal that the node is closed:
self._v_isopen = False
def _g_remove(self, recursive, force):
"""Remove this node from the hierarchy.
If the node has children, recursive removal must be stated by
giving `recursive` a true value; otherwise, a `NodeError` will
be raised.
If `force` is set to true, the node will be removed no matter it
has children or not (useful for deleting hard links).
It does not log the change.
"""
# Remove the node from the PyTables hierarchy.
parent = self._v_parent
parent._g_unrefNode(self._v_name)
# Close the node itself.
self._f_close()
# hdf5Extension operations:
# Remove the node from the HDF5 hierarchy.
self._g_delete(parent)
def _f_remove(self, recursive=False, force=False):
"""Remove this node from the hierarchy.
If the node has children, recursive removal must be stated by giving
recursive a true value; otherwise, a NodeError will be raised.
If the node is a link to a Group object, and you are sure that you want
to delete it, you can do this by setting the force flag to true.
"""
self._g_checkOpen()
file_ = self._v_file
file_._checkWritable()
if file_.isUndoEnabled():
self._g_removeAndLog(recursive, force)
else:
self._g_remove(recursive, force)
def _g_removeAndLog(self, recursive, force):
file_ = self._v_file
oldPathname = self._v_pathname
# Log *before* moving to use the right shadow name.
file_._log('REMOVE', oldPathname)
moveToShadow(file_, oldPathname)
def _g_move(self, newParent, newName):
"""Move this node in the hierarchy.
Moves the node into the given `newParent`, with the given
`newName`.
It does not log the change.
"""
oldParent = self._v_parent
oldName = self._v_name
oldPathname = self._v_pathname # to move the HDF5 node
# Try to insert the node into the new parent.
newParent._g_refNode(self, newName)
# Remove the node from the new parent.
oldParent._g_unrefNode(oldName)
# Remove location information for this node.
self._g_delLocation()
# Set new location information for this node.
self._g_setLocation(newParent, newName)
# hdf5Extension operations:
# Update node attributes.
self._g_new(newParent, self._v_name, init=False)
# Move the node.
#self._v_parent._g_moveNode(oldPathname, self._v_pathname)
self._v_parent._g_moveNode(oldParent._v_objectID, oldName,
newParent._v_objectID, newName,
oldPathname, self._v_pathname)
# Tell dependent objects about the new location of this node.
self._g_updateDependent()
def _f_rename(self, newname, overwrite=False):
"""Rename this node in place.
Changes the name of a node to *newname* (a string). If a node with the
same newname already exists and overwrite is true, recursively remove
it before renaming.
"""
self._f_move(newname=newname, overwrite=overwrite)
def _f_move( self, newparent=None, newname=None,
overwrite=False, createparents=False ):
"""Move or rename this node.
Moves a node into a new parent group, or changes the name of the
node. newparent can be a Group object (see :ref:`GroupClassDescr`) or a
pathname in string form. If it is not specified or None, the current
parent group is chosen as the new parent. newname must be a string
with a new name. If it is not specified or None, the current name is
chosen as the new name. If createparents is true, the needed groups for
the given new parent group path to exist will be created.
Moving a node across databases is not allowed, nor it is moving a node
*into* itself. These result in a NodeError. However, moving a node
*over* itself is allowed and simply does nothing. Moving over another
existing node is similarly not allowed, unless the optional overwrite
argument is true, in which case that node is recursively removed before
moving.
Usually, only the first argument will be used, effectively moving the
node to a new location without changing its name. Using only the
second argument is equivalent to renaming the node in place.
"""
self._g_checkOpen()
file_ = self._v_file
oldParent = self._v_parent
oldName = self._v_name
# Set default arguments.
if newparent is None and newname is None:
raise NodeError( "you should specify at least "
"a ``newparent`` or a ``newname`` parameter" )
if newparent is None:
newparent = oldParent
if newname is None:
newname = oldName
# Get destination location.
if hasattr(newparent, '_v_file'): # from node
newfile = newparent._v_file
newpath = newparent._v_pathname
elif hasattr(newparent, 'startswith'): # from path
newfile = file_
newpath = newparent
else:
raise TypeError( "new parent is not a node nor a path: %r"
% (newparent,) )
# Validity checks on arguments.
# Is it in the same file?
if newfile is not file_:
raise NodeError( "nodes can not be moved across databases; "
"please make a copy of the node" )
# The movement always fails if the hosting file can not be modified.
file_._checkWritable()
# Moving over itself?
oldPath = oldParent._v_pathname
if newpath == oldPath and newname == oldName:
# This is equivalent to renaming the node to its current name,
# and it does not change the referenced object,
# so it is an allowed no-op.
return
# Moving into itself?
self._g_checkNotContains(newpath)
# Note that the previous checks allow us to go ahead and create
# the parent groups if `createparents` is true. `newparent` is
# used instead of `newpath` to avoid accepting `Node` objects
# when `createparents` is true.
newparent = file_._getOrCreatePath(newparent, createparents)
self._g_checkGroup(newparent) # Is it a group?
# Moving over an existing node?
self._g_maybeRemove(newparent, newname, overwrite)
# Move the node.
oldPathname = self._v_pathname
self._g_move(newparent, newname)
# Log the change.
if file_.isUndoEnabled():
self._g_logMove(oldPathname)
def _g_logMove(self, oldPathname):
self._v_file._log('MOVE', oldPathname, self._v_pathname)
def _g_copy(self, newParent, newName, recursive, _log=True, **kwargs):
"""Copy this node and return the new one.
Creates and returns a copy of the node in the given `newParent`,
with the given `newName`. If `recursive` copy is stated, all
descendents are copied as well. Additional keyword argumens may
affect the way that the copy is made. Unknown arguments must be
ignored. On recursive copies, all keyword arguments must be
passed on to the children invocation of this method.
If `_log` is false, the change is not logged. This is *only*
intended to be used by ``_g_copyAsChild()`` as a means of
optimising sub-tree copies.
"""
raise NotImplementedError
def _g_copyAsChild(self, newParent, **kwargs):
"""Copy this node as a child of another group.
Copies just this node into `newParent`, not recursing children
nor overwriting nodes nor logging the copy. This is intended to
be used when copying whole sub-trees.
"""
return self._g_copy( newParent, self._v_name,
recursive=False, _log=False, **kwargs )
def _f_copy(self, newparent=None, newname=None,
overwrite=False, recursive=False, createparents=False,
**kwargs):
"""Copy this node and return the new node.
Creates and returns a copy of the node, maybe in a different place in
the hierarchy. newparent can be a Group object (see
:ref:`GroupClassDescr`) or a pathname in string form. If it is not
specified or None, the current parent group is chosen as the new
parent. newname must be a string with a new name. If it is not
specified or None, the current name is chosen as the new name. If
recursive copy is stated, all descendants are copied as well. If
createparents is true, the needed groups for the given new parent group
path to exist will be created.
Copying a node across databases is supported but can not be
undone. Copying a node over itself is not allowed, nor it is
recursively copying a node into itself. These result in a
NodeError. Copying over another existing node is similarly not allowed,
unless the optional overwrite argument is true, in which case that node
is recursively removed before copying.
Additional keyword arguments may be passed to customize the copying
process. For instance, title and filters may be changed, user
attributes may be or may not be copied, data may be sub-sampled, stats
may be collected, etc. See the documentation for the particular node
type.
Using only the first argument is equivalent to copying the node to a
new location without changing its name. Using only the second argument
is equivalent to making a copy of the node in the same group.
"""
self._g_checkOpen()
srcFile = self._v_file
srcParent = self._v_parent
srcName = self._v_name
dstParent = newparent
dstName = newname
# Set default arguments.
if dstParent is None and dstName is None:
raise NodeError( "you should specify at least "
"a ``newparent`` or a ``newname`` parameter" )
if dstParent is None:
dstParent = srcParent
if dstName is None:
dstName = srcName
# Get destination location.
if hasattr(dstParent, '_v_file'): # from node
dstFile = dstParent._v_file
dstPath = dstParent._v_pathname
elif hasattr(dstParent, 'startswith'): # from path
dstFile = srcFile
dstPath = dstParent
else:
raise TypeError( "new parent is not a node nor a path: %r"
% (dstParent,) )
# Validity checks on arguments.
if dstFile is srcFile:
# Copying over itself?
srcPath = srcParent._v_pathname
if dstPath == srcPath and dstName == srcName:
raise NodeError(
"source and destination nodes are the same node: ``%s``"
% self._v_pathname )
# Recursively copying into itself?
if recursive:
self._g_checkNotContains(dstPath)
# Note that the previous checks allow us to go ahead and create
# the parent groups if `createparents` is true. `dstParent` is
# used instead of `dstPath` because it may be in other file, and
# to avoid accepting `Node` objects when `createparents` is
# true.
dstParent = srcFile._getOrCreatePath(dstParent, createparents)
self._g_checkGroup(dstParent) # Is it a group?
# Copying to another file with undo enabled?
if dstFile is not srcFile and srcFile.isUndoEnabled():
warnings.warn( "copying across databases can not be undone "
"nor redone from this database",
UndoRedoWarning )
# Copying over an existing node?
self._g_maybeRemove(dstParent, dstName, overwrite)
# Copy the node.
# The constructor of the new node takes care of logging.
return self._g_copy(dstParent, dstName, recursive, **kwargs)
def _f_isVisible(self):
"""Is this node visible?"""
self._g_checkOpen()
return isVisiblePath(self._v_pathname)
def _g_checkGroup(self, node):
# Node must be defined in order to define a Group.
# However, we need to know Group here.
# Using classNameDict avoids a circular import.
if not isinstance(node, classNameDict['Node']):
raise TypeError("new parent is not a registered node: %s"
% node._v_pathname)
if not isinstance(node, classNameDict['Group']):
raise TypeError("new parent node ``%s`` is not a group"
% node._v_pathname)
def _g_checkNotContains(self, pathname):
# The not-a-TARDIS test. ;)
mypathname = self._v_pathname
if ( mypathname == '/' # all nodes fall below the root group
or pathname == mypathname
or pathname.startswith(mypathname + '/') ):
raise NodeError(
"can not move or recursively copy node ``%s`` into itself"
% mypathname )
def _g_maybeRemove(self, parent, name, overwrite):
if name in parent:
if not overwrite:
raise NodeError("""\
destination group ``%s`` already has a node named ``%s``; \
you may want to use the ``overwrite`` argument""" % (parent._v_pathname, name))
parent._f_getChild(name)._f_remove(True)
def _g_checkName(self, name):
"""Check validity of name for this particular kind of node.
This is invoked once the standard HDF5 and natural naming checks
have successfully passed.
"""
if name.startswith('_i_'):
# This is reserved for table index groups.
raise ValueError(
"node name starts with reserved prefix ``_i_``: %s" % name)
# <attribute handling>
def _f_getAttr(self, name):
"""Get a PyTables attribute from this node.
If the named attribute does not exist, an AttributeError is raised.
"""
return getattr(self._v_attrs, name)
def _f_setAttr(self, name, value):
"""Set a PyTables attribute for this node.
If the node already has a large number of attributes, a
PerformanceWarning is issued.
"""
setattr(self._v_attrs, name, value)
def _f_delAttr(self, name):
"""Delete a PyTables attribute from this node.
If the named attribute does not exist, an AttributeError is raised.
"""
delattr(self._v_attrs, name)
# </attribute handling>
class NotLoggedMixin:
# Include this class in your inheritance tree
# to avoid changes to instances of your class from being logged.
_AttributeSet = NotLoggedAttributeSet
def _g_logCreate(self):
pass
def _g_logMove(self, oldPathname):
pass
def _g_removeAndLog(self, recursive, force):
self._g_remove(recursive, force)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
buzztroll/unix-agent | refs/heads/master | src/dcm/agent/tests/utils/__init__.py | 51 | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
ghislainp/iris | refs/heads/master | lib/iris/tests/unit/analysis/regrid/test_RectilinearRegridder.py | 5 | # (C) British Crown Copyright 2014 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :class:`iris.analysis._regrid.RectilinearRegridder`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.analysis._regrid import RectilinearRegridder as Regridder
from iris.aux_factory import HybridHeightFactory
from iris.coord_systems import GeogCS, OSGB
from iris.coords import AuxCoord, DimCoord
from iris.cube import Cube
from iris.tests import mock
from iris.tests.stock import global_pp, lat_lon_cube, realistic_4d
RESULT_DIR = ('analysis', 'regrid')
# Convenience to access Regridder static method.
regrid = Regridder._regrid
class Test__regrid__linear(tests.IrisTest):
def setUp(self):
self.x = DimCoord(np.linspace(-2, 57, 60))
self.y = DimCoord(np.linspace(0, 49, 50))
self.xs, self.ys = np.meshgrid(self.x.points, self.y.points)
transformation = lambda x, y: x + y ** 2
# Construct a function which adds dimensions to the 2D data array
# so that we can test higher dimensional functionality.
dim_extender = lambda arr: (arr[np.newaxis, ..., np.newaxis] * [1, 2])
self.data = dim_extender(transformation(self.xs, self.ys))
target_x = np.linspace(-3, 60, 4)
target_y = np.linspace(0.5, 51, 3)
self.target_x, self.target_y = np.meshgrid(target_x, target_y)
#: Expected values, which not quite the analytical value, but
#: representative of the bilinear interpolation scheme.
self.expected = np.array([[[[np.nan, np.nan],
[18.5, 37.],
[39.5, 79.],
[np.nan, np.nan]],
[[np.nan, np.nan],
[681.25, 1362.5],
[702.25, 1404.5],
[np.nan, np.nan]],
[[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan],
[np.nan, np.nan]]]])
self.x_dim = 2
self.y_dim = 1
def assert_values(self, values):
# values is a list of [x, y, [val1, val2]]
xs, ys, expecteds = zip(*values)
expecteds = np.array(expecteds)[None, None, ...]
result = regrid(self.data, self.x_dim, self.y_dim,
self.x, self.y,
np.array([xs]), np.array([ys]))
self.assertArrayAllClose(result, expecteds, rtol=1e-04)
# Check that transposing the input data results in the same values
ndim = self.data.ndim
result2 = regrid(self.data.T, ndim - self.x_dim - 1,
ndim - self.y_dim - 1,
self.x, self.y,
np.array([xs]), np.array([ys]))
self.assertArrayEqual(result.T, result2)
def test_single_values(self):
# Check that the values are sensible e.g. (3 + 4**2 == 19)
self.assert_values([[3, 4, [19, 38]],
[-2, 0, [-2, -4]],
[-2.01, 0, [np.nan, np.nan]],
[2, -0.01, [np.nan, np.nan]],
[57, 0, [57, 114]],
[57.01, 0, [np.nan, np.nan]],
[57, 49, [2458, 4916]],
[57, 49.01, [np.nan, np.nan]]])
def test_simple_result(self):
result = regrid(self.data, self.x_dim, self.y_dim,
self.x, self.y,
self.target_x, self.target_y)
self.assertArrayEqual(result, self.expected)
def test_simple_masked(self):
data = np.ma.MaskedArray(self.data, mask=True)
data.mask[:, 1:30, 1:30] = False
result = regrid(data, self.x_dim, self.y_dim,
self.x, self.y,
self.target_x, self.target_y)
expected_mask = np.array([[[[True, True], [True, True],
[True, True], [True, True]],
[[True, True], [False, False],
[True, True], [True, True]],
[[True, True], [True, True],
[True, True], [True, True]]]], dtype=bool)
expected = np.ma.MaskedArray(self.expected,
mask=expected_mask)
self.assertMaskedArrayEqual(result, expected)
def test_simple_masked_no_mask(self):
data = np.ma.MaskedArray(self.data, mask=False)
result = regrid(data, self.x_dim, self.y_dim,
self.x, self.y,
self.target_x, self.target_y)
self.assertIsInstance(result, np.ma.MaskedArray)
def test_result_transpose_shape(self):
ndim = self.data.ndim
result = regrid(self.data.T, ndim - self.x_dim - 1,
ndim - self.y_dim - 1, self.x, self.y,
self.target_x, self.target_y)
self.assertArrayEqual(result, self.expected.T)
def test_reverse_x_coord(self):
index = [slice(None)] * self.data.ndim
index[self.x_dim] = slice(None, None, -1)
result = regrid(self.data[index], self.x_dim,
self.y_dim, self.x[::-1], self.y,
self.target_x, self.target_y)
self.assertArrayEqual(result, self.expected)
def test_circular_x_coord(self):
# Check that interpolation of a circular src coordinate doesn't result
# in an out of bounds value.
self.x.circular = True
self.x.units = 'degree'
result = regrid(self.data, self.x_dim, self.y_dim,
self.x, self.y, np.array([[58]]),
np.array([[0]]))
self.assertArrayAlmostEqual(result,
np.array([56.80398671, 113.60797342],
ndmin=self.data.ndim))
# Check what happens to NaN values, extrapolated values, and
# masked values.
class Test__regrid__extrapolation_modes(tests.IrisTest):
values_by_method = {'linear': [[np.nan, np.nan, 2, 3, np.nan],
[np.nan, np.nan, 6, 7, np.nan],
[8, 9, 10, 11, np.nan]],
'nearest': [[np.nan, 1, 2, 3, np.nan],
[4, 5, 6, 7, np.nan],
[8, 9, 10, 11, np.nan]]}
extrapolate_values_by_method = {'linear': [[np.nan, np.nan, 2, 3, 4],
[np.nan, np.nan, 6, 7, 8],
[8, 9, 10, 11, 12]],
'nearest': [[np.nan, 1, 2, 3, 3],
[4, 5, 6, 7, 7],
[8, 9, 10, 11, 11]]}
def setUp(self):
self.methods = ('linear', 'nearest')
def _regrid(self, data, method, extrapolation_mode=None):
x = np.arange(4)
y = np.arange(3)
x_coord = DimCoord(x)
y_coord = DimCoord(y)
x_dim, y_dim = 1, 0
grid_x, grid_y = np.meshgrid(np.arange(5), y)
kwargs = dict(method=method)
if extrapolation_mode is not None:
kwargs['extrapolation_mode'] = extrapolation_mode
result = regrid(data, x_dim, y_dim, x_coord, y_coord,
grid_x, grid_y, **kwargs)
return result
def test_default_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method)
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.values_by_method[method]
self.assertArrayEqual(result, expected)
def test_default_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked_expanded(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
# Make sure the mask has been expanded
data.mask = False
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_method_ndarray(self):
# NaN -> NaN
# Extrapolated -> linear
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'extrapolate')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.extrapolate_values_by_method[method]
self.assertArrayEqual(result, expected)
def test_method_maskedarray(self):
# NaN -> NaN
# Extrapolated -> linear
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'extrapolate')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 1]]
values = self.extrapolate_values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_nan_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'nan')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.values_by_method[method]
self.assertArrayEqual(result, expected)
def test_nan_maskedarray(self):
# NaN -> NaN
# Extrapolated -> NaN
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'nan')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_error_ndarray(self):
# Values irrelevant - the function raises an error.
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(data, method, 'error')
def test_error_maskedarray(self):
# Values irrelevant - the function raises an error.
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(data, method, 'error')
def test_mask_ndarray(self):
# NaN -> NaN
# Extrapolated -> Masked (this is different from all the other
# modes)
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_mask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_nanmask_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
data = np.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
for method in self.methods:
result = self._regrid(data, method, 'nanmask')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.values_by_method[method]
self.assertArrayEqual(result, expected)
def test_nanmask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
data = np.ma.arange(12, dtype=np.float).reshape(3, 4)
data[0, 0] = np.nan
data[2, 3] = np.ma.masked
for method in self.methods:
result = self._regrid(data, method, 'nanmask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 1]]
values = self.values_by_method[method]
expected = np.ma.MaskedArray(values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_invalid(self):
data = np.arange(12, dtype=np.float).reshape(3, 4)
emsg = 'Invalid extrapolation mode'
for method in self.methods:
with self.assertRaisesRegexp(ValueError, emsg):
self._regrid(data, method, 'BOGUS')
class Test___call____invalid_types(tests.IrisTest):
def setUp(self):
self.cube = lat_lon_cube()
# Regridder method and extrapolation-mode.
self.args = ('linear', 'mask')
self.regridder = Regridder(self.cube, self.cube, *self.args)
def test_src_as_array(self):
arr = np.zeros((3, 4))
with self.assertRaises(TypeError):
Regridder(arr, self.cube, *self.args)
with self.assertRaises(TypeError):
self.regridder(arr)
def test_grid_as_array(self):
with self.assertRaises(TypeError):
Regridder(self.cube, np.zeros((3, 4)), *self.args)
def test_src_as_int(self):
with self.assertRaises(TypeError):
Regridder(42, self.cube, *self.args)
with self.assertRaises(TypeError):
self.regridder(42)
def test_grid_as_int(self):
with self.assertRaises(TypeError):
Regridder(self.cube, 42, *self.args)
class Test___call____missing_coords(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self, coord_names):
# Deletes the named coords from `bad`.
ok = lat_lon_cube()
bad = lat_lon_cube()
for name in coord_names:
bad.remove_coord(name)
return ok, bad
def test_src_missing_lat(self):
ok, bad = self.ok_bad(['latitude'])
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_missing_lat(self):
ok, bad = self.ok_bad(['latitude'])
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_missing_lon(self):
ok, bad = self.ok_bad(['longitude'])
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_missing_lon(self):
ok, bad = self.ok_bad(['longitude'])
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_missing_lat_lon(self):
ok, bad = self.ok_bad(['latitude', 'longitude'])
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_missing_lat_lon(self):
ok, bad = self.ok_bad(['latitude', 'longitude'])
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____not_dim_coord(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self, coord_name):
# Demotes the named DimCoord on `bad` to an AuxCoord.
ok = lat_lon_cube()
bad = lat_lon_cube()
coord = bad.coord(coord_name)
dims = bad.coord_dims(coord)
bad.remove_coord(coord_name)
aux_coord = AuxCoord.from_coord(coord)
bad.add_aux_coord(aux_coord, dims)
return ok, bad
def test_src_with_aux_lat(self):
ok, bad = self.ok_bad('latitude')
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_with_aux_lat(self):
ok, bad = self.ok_bad('latitude')
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_with_aux_lon(self):
ok, bad = self.ok_bad('longitude')
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_with_aux_lon(self):
ok, bad = self.ok_bad('longitude')
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____not_dim_coord_share(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self):
# Make lat/lon share a single dimension on `bad`.
ok = lat_lon_cube()
bad = lat_lon_cube()
lat = bad.coord('latitude')
bad = bad[0, :lat.shape[0]]
bad.remove_coord('latitude')
bad.add_aux_coord(lat, 0)
return ok, bad
def test_src_shares_dim(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
regridder = Regridder(ok, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_shares_dim(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____bad_georeference(tests.IrisTest):
def setUp(self):
self.args = ('linear', 'mask')
def ok_bad(self, lat_cs, lon_cs):
# Updates `bad` to use the given coordinate systems.
ok = lat_lon_cube()
bad = lat_lon_cube()
bad.coord('latitude').coord_system = lat_cs
bad.coord('longitude').coord_system = lon_cs
return ok, bad
def test_src_no_cs(self):
ok, bad = self.ok_bad(None, None)
regridder = Regridder(bad, ok, *self.args)
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_no_cs(self):
ok, bad = self.ok_bad(None, None)
regridder = Regridder(ok, bad, *self.args)
with self.assertRaises(ValueError):
regridder(ok)
def test_src_one_cs(self):
ok, bad = self.ok_bad(None, GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
def test_grid_one_cs(self):
ok, bad = self.ok_bad(None, GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
def test_src_inconsistent_cs(self):
ok, bad = self.ok_bad(GeogCS(6370000), GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(bad, ok, *self.args)
def test_grid_inconsistent_cs(self):
ok, bad = self.ok_bad(GeogCS(6370000), GeogCS(6371000))
with self.assertRaises(ValueError):
Regridder(ok, bad, *self.args)
class Test___call____bad_angular_units(tests.IrisTest):
def ok_bad(self):
# Changes the longitude coord to radians on `bad`.
ok = lat_lon_cube()
bad = lat_lon_cube()
bad.coord('longitude').units = 'radians'
return ok, bad
def test_src_radians(self):
ok, bad = self.ok_bad()
regridder = Regridder(bad, ok, 'linear', 'mask')
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_radians(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(ok, bad, 'linear', 'mask')
def uk_cube():
data = np.arange(12, dtype=np.float32).reshape(3, 4)
uk = Cube(data)
cs = OSGB()
y_coord = DimCoord(np.arange(3), 'projection_y_coordinate', units='m',
coord_system=cs)
x_coord = DimCoord(np.arange(4), 'projection_x_coordinate', units='m',
coord_system=cs)
uk.add_dim_coord(y_coord, 0)
uk.add_dim_coord(x_coord, 1)
surface = AuxCoord(data * 10, 'surface_altitude', units='m')
uk.add_aux_coord(surface, (0, 1))
uk.add_aux_factory(HybridHeightFactory(orography=surface))
return uk
class Test___call____bad_linear_units(tests.IrisTest):
def ok_bad(self):
# Defines `bad` with an x coordinate in km.
ok = lat_lon_cube()
bad = uk_cube()
bad.coord(axis='x').units = 'km'
return ok, bad
def test_src_km(self):
ok, bad = self.ok_bad()
regridder = Regridder(bad, ok, 'linear', 'mask')
with self.assertRaises(ValueError):
regridder(bad)
def test_grid_km(self):
ok, bad = self.ok_bad()
with self.assertRaises(ValueError):
Regridder(ok, bad, 'linear', 'mask')
class Test___call____no_coord_systems(tests.IrisTest):
# Test behaviour in the absence of any coordinate systems.
def setUp(self):
self.mode = 'mask'
self.methods = ('linear', 'nearest')
def remove_coord_systems(self, cube):
for coord in cube.coords():
coord.coord_system = None
def test_ok(self):
# Ensure regridding is supported when the coordinate definitions match.
# NB. We change the coordinate *values* to ensure that does not
# prevent the regridding operation.
src = uk_cube()
self.remove_coord_systems(src)
grid = src.copy()
for coord in grid.dim_coords:
coord.points = coord.points + 1
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
for coord in result.dim_coords:
self.assertEqual(coord, grid.coord(coord))
expected = np.ma.arange(12).reshape((3, 4)) + 5
expected[:, 3] = np.ma.masked
expected[2, :] = np.ma.masked
self.assertMaskedArrayEqual(result.data, expected)
def test_matching_units(self):
# Check we are insensitive to the units provided they match.
# NB. We change the coordinate *values* to ensure that does not
# prevent the regridding operation.
src = uk_cube()
self.remove_coord_systems(src)
# Move to unusual units (i.e. not metres or degrees).
for coord in src.dim_coords:
coord.units = 'feet'
grid = src.copy()
for coord in grid.dim_coords:
coord.points = coord.points + 1
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
for coord in result.dim_coords:
self.assertEqual(coord, grid.coord(coord))
expected = np.ma.arange(12).reshape((3, 4)) + 5
expected[:, 3] = np.ma.masked
expected[2, :] = np.ma.masked
self.assertMaskedArrayEqual(result.data, expected)
def test_different_units(self):
src = uk_cube()
self.remove_coord_systems(src)
# Move to unusual units (i.e. not metres or degrees).
for coord in src.coords():
coord.units = 'feet'
grid = src.copy()
grid.coord('projection_y_coordinate').units = 'yards'
# We change the coordinate *values* to ensure that does not
# prevent the regridding operation.
for coord in grid.dim_coords:
coord.points = coord.points + 1
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
emsg = 'matching coordinate metadata'
with self.assertRaisesRegexp(ValueError, emsg):
regridder(src)
def test_coord_metadata_mismatch(self):
# Check for failure when coordinate definitions differ.
uk = uk_cube()
self.remove_coord_systems(uk)
lat_lon = lat_lon_cube()
self.remove_coord_systems(lat_lon)
for method in self.methods:
regridder = Regridder(uk, lat_lon, method, self.mode)
with self.assertRaises(ValueError):
regridder(uk)
class Test___call____extrapolation_modes(tests.IrisTest):
values = [[np.nan, 6, 7, np.nan],
[9, 10, 11, np.nan],
[np.nan, np.nan, np.nan, np.nan]]
extrapolate_values_by_method = {'linear': [[np.nan, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]],
'nearest': [[np.nan, 6, 7, 7],
[9, 10, 11, 11],
[9, 10, 11, 11]]}
surface_values = [[50, 60, 70, np.nan],
[90, 100, 110, np.nan],
[np.nan, np.nan, np.nan, np.nan]]
def setUp(self):
self.methods = ('linear', 'nearest')
def _ndarray_cube(self, method):
assert method in self.methods
src = uk_cube()
index = (0, 0) if method == 'linear' else (1, 1)
src.data[index] = np.nan
return src
def _masked_cube(self, method):
assert method in self.methods
src = uk_cube()
src.data = np.ma.asarray(src.data)
nan_index = (0, 0) if method == 'linear' else (1, 1)
mask_index = (2, 3)
src.data[nan_index] = np.nan
src.data[mask_index] = np.ma.masked
return src
def _regrid(self, src, method, extrapolation_mode='mask'):
grid = src.copy()
for coord in grid.dim_coords:
coord.points = coord.points + 1
regridder = Regridder(src, grid, method, extrapolation_mode)
result = regridder(src)
surface = result.coord('surface_altitude').points
self.assertNotIsInstance(surface, np.ma.MaskedArray)
self.assertArrayEqual(surface, self.surface_values)
return result.data
def test_default_ndarray(self):
# NaN -> NaN
# Extrapolated -> Masked
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
for method in self.methods:
src = uk_cube()
src.data = np.ma.asarray(src.data)
index = (0, 0) if method == 'linear' else (1, 1)
src.data[index] = np.nan
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_default_maskedarray_none_masked_expanded(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> N/A
for method in self.methods:
src = uk_cube()
src.data = np.ma.asarray(src.data)
# Make sure the mask has been expanded
src.data.mask = False
index = (0, 0) if method == 'linear' else (1, 1)
src.data[index] = np.nan
result = self._regrid(src, method)
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_method_ndarray(self):
# NaN -> NaN
# Extrapolated -> linear
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'extrapolate')
self.assertNotIsInstance(result, np.ma.MaskedArray)
expected = self.extrapolate_values_by_method[method]
self.assertArrayEqual(result, expected)
def test_nan_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'nan')
self.assertNotIsInstance(result, np.ma.MaskedArray)
self.assertArrayEqual(result, self.values)
def test_nan_maskedarray(self):
# NaN -> NaN
# Extrapolated -> NaN
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method, 'nan')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_error_ndarray(self):
# Values irrelevant - the function raises an error.
for method in self.methods:
src = self._ndarray_cube(method)
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(src, method, 'error')
def test_error_maskedarray(self):
# Values irrelevant - the function raises an error.
for method in self.methods:
src = self._masked_cube(method)
with self.assertRaisesRegexp(ValueError, 'out of bounds'):
self._regrid(src, method, 'error')
def test_mask_ndarray(self):
# NaN -> NaN
# Extrapolated -> Masked (this is different from all the other
# modes)
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 0, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_mask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method, 'mask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_nanmask_ndarray(self):
# NaN -> NaN
# Extrapolated -> NaN
for method in self.methods:
src = self._ndarray_cube(method)
result = self._regrid(src, method, 'nanmask')
self.assertNotIsInstance(result, np.ma.MaskedArray)
self.assertArrayEqual(result, self.values)
def test_nanmask_maskedarray(self):
# NaN -> NaN
# Extrapolated -> Masked
# Masked -> Masked
for method in self.methods:
src = self._masked_cube(method)
result = self._regrid(src, method, 'nanmask')
self.assertIsInstance(result, np.ma.MaskedArray)
mask = [[0, 0, 0, 1],
[0, 0, 1, 1],
[1, 1, 1, 1]]
expected = np.ma.MaskedArray(self.values, mask)
self.assertMaskedArrayEqual(result, expected)
def test_invalid(self):
src = uk_cube()
emsg = 'Invalid extrapolation mode'
for method in self.methods:
with self.assertRaisesRegexp(ValueError, emsg):
self._regrid(src, method, 'BOGUS')
@tests.skip_data
class Test___call____rotated_to_lat_lon(tests.IrisTest):
def setUp(self):
self.src = realistic_4d()[:5, :2, ::40, ::30]
self.mode = 'mask'
self.methods = ('linear', 'nearest')
def test_single_point(self):
src = self.src[0, 0]
grid = global_pp()[:1, :1]
# These coordinate values have been derived by converting the
# rotated coordinates of src[1, 1] into lat/lon by using cs2cs.
grid.coord('longitude').points = -3.144870
grid.coord('latitude').points = 52.406444
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
self.assertEqual(src.data[1, 1], result.data)
def test_transposed_src(self):
# The source dimensions are in a non-standard order.
src = self.src
src.transpose([3, 1, 2, 0])
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
result.transpose([3, 1, 2, 0])
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def _grid_subset(self):
# The destination grid points are entirely contained within the
# src grid points.
grid = global_pp()[:4, :5]
grid.coord('longitude').points = np.linspace(-3.182, -3.06, 5)
grid.coord('latitude').points = np.linspace(52.372, 52.44, 4)
return grid
def test_reversed(self):
src = self.src
grid = self._grid_subset()
for method in self.methods:
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
regridder = Regridder(src, grid[::-1], method, self.mode)
result = regridder(src)
self.assertCMLApproxData(result[:, :, ::-1], cml)
sample = src[:, :, ::-1]
regridder = Regridder(sample, grid[::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1], cml)
sample = src[:, :, :, ::-1]
regridder = Regridder(sample, grid[::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1], cml)
sample = src[:, :, ::-1, ::-1]
regridder = Regridder(sample, grid[::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1], cml)
regridder = Regridder(src, grid[:, ::-1], method, self.mode)
result = regridder(src)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
sample = src[:, :, ::-1]
regridder = Regridder(sample, grid[:, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
sample = src[:, :, :, ::-1]
regridder = Regridder(sample, grid[:, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
sample = src[:, :, ::-1, ::-1]
regridder = Regridder(sample, grid[:, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, :, ::-1], cml)
regridder = Regridder(src, grid[::-1, ::-1], method, self.mode)
result = regridder(src)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
sample = src[:, :, ::-1]
regridder = Regridder(sample, grid[::-1, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
sample = src[:, :, :, ::-1]
regridder = Regridder(sample, grid[::-1, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
sample = src[:, :, ::-1, ::-1]
regridder = Regridder(sample, grid[::-1, ::-1], method, self.mode)
result = regridder(sample)
self.assertCMLApproxData(result[:, :, ::-1, ::-1], cml)
def test_grid_subset(self):
# The destination grid points are entirely contained within the
# src grid points.
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def _big_grid(self):
grid = self._grid_subset()
big_grid = Cube(np.zeros((5, 10, 3, 4, 5)))
big_grid.add_dim_coord(grid.coord('latitude'), 3)
big_grid.add_dim_coord(grid.coord('longitude'), 4)
return big_grid
def test_grid_subset_big(self):
# Add some extra dimensions to the destination Cube and
# these should be safely ignored.
big_grid = self._big_grid()
for method in self.methods:
regridder = Regridder(self.src, big_grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_big_transposed(self):
# The order of the grid's dimensions (including the X and Y
# dimensions) must not affect the result.
big_grid = self._big_grid()
big_grid.transpose([4, 0, 3, 1, 2])
for method in self.methods:
regridder = Regridder(self.src, big_grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_subset.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_anon(self):
# Must cope OK with anonymous source dimensions.
src = self.src
src.remove_coord('time')
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_subset_anon.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_missing_data_1(self):
# The destination grid points are entirely contained within the
# src grid points AND we have missing data.
src = self.src
src.data = np.ma.MaskedArray(src.data)
src.data[:, :, 0, 0] = np.ma.masked
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_subset_masked_1.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_subset_missing_data_2(self):
# The destination grid points are entirely contained within the
# src grid points AND we have missing data.
src = self.src
src.data = np.ma.MaskedArray(src.data)
src.data[:, :, 1, 2] = np.ma.masked
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_subset_masked_2.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_partial_overlap(self):
# The destination grid points are partially contained within the
# src grid points.
grid = global_pp()[:4, :4]
grid.coord('longitude').points = np.linspace(-3.3, -3.06, 4)
grid.coord('latitude').points = np.linspace(52.377, 52.43, 4)
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
cml = RESULT_DIR + ('{}_partial_overlap.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_grid_no_overlap(self):
# The destination grid points are NOT contained within the
# src grid points.
grid = global_pp()[:4, :4]
grid.coord('longitude').points = np.linspace(-3.3, -3.2, 4)
grid.coord('latitude').points = np.linspace(52.377, 52.43, 4)
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
self.assertCMLApproxData(result, RESULT_DIR + ('no_overlap.cml',))
def test_grid_subset_missing_data_aux(self):
# The destination grid points are entirely contained within the
# src grid points AND we have missing data on the aux coordinate.
src = self.src
src.coord('surface_altitude').points[1, 2] = np.ma.masked
grid = self._grid_subset()
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
cml = RESULT_DIR + ('{}_masked_altitude.cml'.format(method),)
self.assertCMLApproxData(result, cml)
class Test___call____NOP(tests.IrisTest):
def setUp(self):
# The destination grid points are exactly the same as the
# src grid points.
self.src = realistic_4d()[:5, :2, ::40, ::30]
self.grid = self.src.copy()
def test_nop__linear(self):
regridder = Regridder(self.src, self.grid, 'linear', 'mask')
result = regridder(self.src)
self.assertEqual(result, self.src)
def test_nop__nearest(self):
regridder = Regridder(self.src, self.grid, 'nearest', 'mask')
result = regridder(self.src)
self.assertEqual(result, self.src)
@tests.skip_data
class Test___call____circular(tests.IrisTest):
def setUp(self):
src = global_pp()[::10, ::10]
level_height = AuxCoord(0, long_name='level_height', units='m',
attributes={'positive': 'up'})
sigma = AuxCoord(1, long_name='sigma')
surface_altitude = AuxCoord((src.data - src.data.min()) * 50,
'surface_altitude', units='m')
src.add_aux_coord(level_height)
src.add_aux_coord(sigma)
src.add_aux_coord(surface_altitude, [0, 1])
hybrid_height = HybridHeightFactory(level_height, sigma,
surface_altitude)
src.add_aux_factory(hybrid_height)
self.src = src
grid = global_pp()[:4, :4]
grid.coord('longitude').points = grid.coord('longitude').points - 5
self.grid = grid
self.mode = 'mask'
self.methods = ('linear', 'nearest')
def test_non_circular(self):
# Non-circular src -> non-circular grid
for method in self.methods:
regridder = Regridder(self.src, self.grid, method, self.mode)
result = regridder(self.src)
self.assertFalse(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_non_circular.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_circular_src(self):
# Circular src -> non-circular grid
src = self.src
src.coord('longitude').circular = True
for method in self.methods:
regridder = Regridder(src, self.grid, method, self.mode)
result = regridder(src)
self.assertFalse(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_circular_src.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_circular_grid(self):
# Non-circular src -> circular grid
grid = self.grid
grid.coord('longitude').circular = True
for method in self.methods:
regridder = Regridder(self.src, grid, method, self.mode)
result = regridder(self.src)
self.assertTrue(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_circular_grid.cml'.format(method),)
self.assertCMLApproxData(result, cml)
def test_circular_src_and_grid(self):
# Circular src -> circular grid
src = self.src
src.coord('longitude').circular = True
grid = self.grid
grid.coord('longitude').circular = True
for method in self.methods:
regridder = Regridder(src, grid, method, self.mode)
result = regridder(src)
self.assertTrue(result.coord('longitude').circular)
cml = RESULT_DIR + ('{}_both_circular.cml'.format(method),)
self.assertCMLApproxData(result, cml)
if __name__ == '__main__':
tests.main()
|
briney/abstar | refs/heads/master | docs/source/conf.py | 1 | # -*- coding: utf-8 -*-
#
# abstar documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 11 12:45:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# from abstar.version import __version__
if sys.version_info[0] > 2:
from unittest.mock import MagicMock
else:
from mock import MagicMock
if os.environ.get('READTHEDOCS', None) == 'True':
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pygtk', 'gtk', 'gobject', 'argparse', 'numpy', 'nwalign', 'pandas', 'abutils', 'dask', 'dask.dataframe',
'abutils.utils', 'abutils.core', 'abutils.core.sequence', 'abutils.utils.log', 'abutils.utils.alignment',
'abutils.utils.codons', 'abutils.utils.pipeline', 'abutils.utils.decorators', 'abutils.utils.progbar',
'biopython', 'celery', 'pymongo', 'scikit-bio', 'BaseSpacePy', 'BaseSpacePy.api',
'BaseSpacePy.model', 'BaseSpacePy.api.BaseSpaceAPI', 'BaseSpacePy.model.QueryParameters',
'Bio', 'Bio.Align', 'Bio.Alphabet', 'Bio.SeqIO', 'Bio.Seq', 'Bio.SeqRecord',
'Bio.Blast', 'Bio.Blast.Applications']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'abstar'
copyright = u'2018, Bryan Briney'
author = u'Bryan Briney'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.4'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'abstardoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'abstar.tex', u'abstar Documentation',
u'Bryan Briney', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'abstar', u'abstar Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'abstar', u'abstar Documentation',
author, 'abstar', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members']
|
pakodekker/oceansar | refs/heads/master | oceansar/nrcs/__init__.py | 1 | """
=========================================
NRCS Models Module (:mod:`osiris.nrcs`)
=========================================
This module includes a number of backscattering models.
.. currentmodule:: osiris.nrcs
RCS Models
Available models
----------------
.. toctree::
:maxdepth: 1
.. automodule:: osiris.nrcs.ka
:members:
.. automodule:: osiris.nrcs.kodis
:members:
.. automodule:: osiris.nrcs.romeiser97
:members:
"""
from .ka import RCSKA
from .kodis import RCSKodis
from .romeiser97 import RCSRomeiser97
models = {'ka': RCSKA,
'kodis': RCSKodis,
'romeiser97': RCSRomeiser97} |
gnome-prototypes-team/gnome-music | refs/heads/master | gnomemusic/toolbar.py | 1 | # Copyright (c) 2013 Vadim Rutkovsky <[email protected]>
# Copyright (c) 2013 Arnel A. Borja <[email protected]>
# Copyright (c) 2013 Eslam Mostafa <[email protected]>
# Copyright (c) 2013 Sai Suman Prayaga <[email protected]>
# Copyright (c) 2013 Seif Lotfy <[email protected]>
# Copyright (c) 2013 Guillaume Quintard <[email protected]>
#
# GNOME Music is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GNOME Music is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with GNOME Music; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The GNOME Music authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and GNOME Music. This permission is above and beyond the permissions
# granted by the GPL license by which GNOME Music is covered. If you
# modify this code, you may extend this exception to your version of the
# code, but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version.
from gi.repository import Gtk, GObject
from gnomemusic.searchbar import Searchbar, DropDown
from gnomemusic import log
import logging
logger = logging.getLogger(__name__)
class ToolbarState:
MAIN = 0
CHILD_VIEW = 1
SEARCH_VIEW = 2
class Toolbar(GObject.GObject):
__gsignals__ = {
'state-changed': (GObject.SIGNAL_RUN_FIRST, None, ()),
'selection-mode-changed': (GObject.SIGNAL_RUN_FIRST, None, ()),
}
_selectionMode = False
@log
def __init__(self):
GObject.GObject.__init__(self)
self._stack_switcher = Gtk.StackSwitcher(
margin_top=2, margin_bottom=2, can_focus=False, halign="center")
self._stack_switcher.show()
self._ui = Gtk.Builder()
self._ui.add_from_resource('/org/gnome/Music/headerbar.ui')
self.header_bar = self._ui.get_object('header-bar')
self._search_button = self._ui.get_object('search-button')
self.dropdown = DropDown()
self.searchbar = Searchbar(self._stack_switcher, self._search_button, self.dropdown)
self.dropdown.initialize_filters(self.searchbar)
self._select_button = self._ui.get_object('select-button')
self._cancel_button = self._ui.get_object('done-button')
self._back_button = self._ui.get_object('back-button')
self._selection_menu = self._ui.get_object('selection-menu')
self._selection_menu_button = self._ui.get_object('selection-menu-button')
self._selection_menu_label = self._ui.get_object('selection-menu-button-label')
self._selection_menu_button.set_relief(Gtk.ReliefStyle.NONE)
if Gtk.get_minor_version() >= 11:
self.header_bar.remove(self._select_button)
self.header_bar.remove(self._cancel_button)
self.header_bar.remove(self._search_button)
self.header_bar.pack_end(self._select_button)
self.header_bar.pack_end(self._cancel_button)
self.header_bar.pack_end(self._search_button)
self._back_button.connect('clicked', self.on_back_button_clicked)
@log
def reset_header_title(self):
self.header_bar.set_custom_title(self._stack_switcher)
@log
def set_stack(self, stack):
self._stack_switcher.set_stack(stack)
@log
def get_stack(self):
return self._stack_switcher.get_stack()
@log
def hide_stack(self):
self._stack_switcher.hide()
@log
def show_stack(self):
self._stack_switcher.show()
@log
def set_selection_mode(self, selectionMode):
self._selectionMode = selectionMode
if selectionMode:
self._select_button.hide()
self._cancel_button.show()
self.header_bar.get_style_context().add_class('selection-mode')
self._cancel_button.get_style_context().remove_class('selection-mode')
else:
self.header_bar.get_style_context().remove_class('selection-mode')
self._select_button.set_active(False)
self._select_button.show()
self._cancel_button.hide()
self.emit('selection-mode-changed')
self._update()
@log
def on_back_button_clicked(self, widget):
view = self._stack_switcher.get_stack().get_visible_child()
view._back_button_clicked(view)
self.set_state(ToolbarState.MAIN)
@log
def set_state(self, state, btn=None):
self._state = state
self._update()
self.emit('state-changed')
@log
def _update(self):
if self._selectionMode:
self.header_bar.set_custom_title(self._selection_menu_button)
elif self._state != ToolbarState.MAIN:
self.header_bar.set_custom_title(None)
else:
self.reset_header_title()
self._search_button.set_visible(self._state != ToolbarState.SEARCH_VIEW)
self._back_button.set_visible(not self._selectionMode and self._state != ToolbarState.MAIN)
self.header_bar.set_show_close_button(not self._selectionMode)
|
kaizu/nurgle | refs/heads/master | gen/ecocyc.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import csv
# import copy
# import os.path
# import itertools
import logging
log_ = logging.getLogger(__name__)
from enum import Enum, auto
# from . import utils
from . import _ecocyc
__program__ = 'ecocyc.py'
__version__ = '0.1'
__author__ = 'Kazunari Kaizu'
ECOCYC_LOADED = False
def load(*args, **kwargs):
global ECOCYC_LOADED
if not ECOCYC_LOADED:
_ecocyc.load(*args, **kwargs)
ECOCYC_LOADED = True
def find_entry(root, entry_id):
entries = []
for r in root:
if r['UNIQUE-ID'] == entry_id:
entries.append(r)
if len(entries) == 0:
# log_.warn('[{}] could not be found.'.format(entry_id))
return None
elif len(entries) > 1:
# log_.warn('[{}] has multiple entries.'.format(entry_id))
return None
return entries[0]
def find_reaction(entry_id):
return find_entry(_ecocyc.ECOCYC_REACTIONS, entry_id)
def find_enzrxn(entry_id):
return find_entry(_ecocyc.ECOCYC_ENZRXNS, entry_id)
def find_dnabindsite(entry_id):
return find_entry(_ecocyc.ECOCYC_DNABINDSITES, entry_id)
def find_promoter(entry_id):
return find_entry(_ecocyc.ECOCYC_PROMOTERS, entry_id)
def find_transunit(entry_id):
return find_entry(_ecocyc.ECOCYC_TRANSUNITS, entry_id)
def proteins():
return _ecocyc.ECOCYC_PROTEINS
def rnas():
return _ecocyc.ECOCYC_RNAS
def transunits():
return _ecocyc.ECOCYC_TRANSUNITS
def regulations():
return _ecocyc.ECOCYC_REGULATIONS
class EcocycKind(Enum):
UNKNOWN = auto()
REACTION = auto()
ENZRXN = auto()
PROTEIN = auto()
RNA = auto()
TRANSUNIT = auto()
REGULATION = auto()
def kind(entry_id):
for root, res in (
(_ecocyc.ECOCYC_REACTIONS, EcocycKind.REACTION),
(_ecocyc.ECOCYC_ENZRXNS, EcocycKind.ENZRXN),
(_ecocyc.ECOCYC_PROTEINS, EcocycKind.PROTEIN),
(_ecocyc.ECOCYC_RNAS, EcocycKind.RNA),
(_ecocyc.ECOCYC_TRANSUNITS, EcocycKind.TRANSUNIT),
(_ecocyc.ECOCYC_REGULATIONS, EcocycKind.REGULATION),
):
if find_entry(root, entry_id) is not None:
return res
return EcocycKind.UNKNOWN |
lcy-seso/models | refs/heads/develop | fluid/language_model/utils.py | 2 | import sys
import time
import numpy as np
import paddle.fluid as fluid
import paddle
def to_lodtensor(data, place):
""" convert to LODtensor """
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = fluid.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
def prepare_data(batch_size,
buffer_size=1000,
word_freq_threshold=0,
enable_ce=False):
""" prepare the English Pann Treebank (PTB) data """
vocab = paddle.dataset.imikolov.build_dict(word_freq_threshold)
if enable_ce:
train_reader = paddle.batch(
paddle.dataset.imikolov.train(
vocab,
buffer_size,
data_type=paddle.dataset.imikolov.DataType.SEQ),
batch_size)
else:
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.imikolov.train(
vocab,
buffer_size,
data_type=paddle.dataset.imikolov.DataType.SEQ),
buf_size=buffer_size),
batch_size)
test_reader = paddle.batch(
paddle.dataset.imikolov.test(
vocab, buffer_size, data_type=paddle.dataset.imikolov.DataType.SEQ),
batch_size)
return vocab, train_reader, test_reader
|
Orochimarufan/youtube-dl | refs/heads/master | youtube_dl/extractor/rmcdecouverte.py | 20 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import smuggle_url
class RMCDecouverteIE(InfoExtractor):
_VALID_URL = r'https?://rmcdecouverte\.bfmtv\.com/(?:(?:[^/]+/)*program_(?P<id>\d+)|(?P<live_id>mediaplayer-direct))'
_TESTS = [{
'url': 'https://rmcdecouverte.bfmtv.com/wheeler-dealers-occasions-a-saisir/program_2566/',
'info_dict': {
'id': '5983675500001',
'ext': 'mp4',
'title': 'CORVETTE',
'description': 'md5:c1e8295521e45ffebf635d6a7658f506',
'uploader_id': '1969646226001',
'upload_date': '20181226',
'timestamp': 1545861635,
},
'params': {
'skip_download': True,
},
'skip': 'only available for a week',
}, {
# live, geo restricted, bypassable
'url': 'https://rmcdecouverte.bfmtv.com/mediaplayer-direct/',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1969646226001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id') or mobj.group('live_id')
webpage = self._download_webpage(url, display_id)
brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
if brightcove_legacy_url:
brightcove_id = compat_parse_qs(compat_urlparse.urlparse(
brightcove_legacy_url).query)['@videoPlayer'][0]
else:
brightcove_id = self._search_regex(
r'data-video-id=["\'](\d+)', webpage, 'brightcove id')
return self.url_result(
smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': ['FR']}),
'BrightcoveNew', brightcove_id)
|
pombredanne/inferno | refs/heads/master | inferno/lib/__init__.py | 4 | __version__ = '0.2.39'
|
tmkasun/Knnect | refs/heads/master | map_service/lib/SpatialUtils.py | 1 | from datetime import datetime
class SpatialCons(object):
DATE_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATE_FORMAT = "%Y-%m-%d"
START_TIME = "start_time"
END_TIME = "end_time"
class SpatialUtils(object):
@staticmethod
def validate_date(date):
try:
return datetime.strptime(date, SpatialCons.DATE_FORMAT)
except ValueError:
raise ValueError("Incorrect data format, should be YYYY-MM-DD")
|
fmierlo/django-default-settings | refs/heads/master | release/1.6/project/defaults.py | 9 | SECRET_KEY = '01234567890123456789012345678901234567890123456789'
|
sorenh/cc | refs/heads/master | vendor/Twisted-10.0.0/doc/core/examples/pbbenchclient.py | 23 |
from twisted.spread import pb
from twisted.internet import defer, reactor
from twisted.cred.credentials import UsernamePassword
import time
class PBBenchClient:
hostname = 'localhost'
portno = pb.portno
calledThisSecond = 0
def callLoop(self, ignored):
d1 = self.persp.callRemote("simple")
d2 = self.persp.callRemote("complexTypes")
defer.DeferredList([d1, d2]).addCallback(self.callLoop)
self.calledThisSecond += 1
thisSecond = int(time.time())
if thisSecond != self.lastSecond:
if thisSecond - self.lastSecond > 1:
print "WARNING it took more than one second"
print 'cps:', self.calledThisSecond
self.calledThisSecond = 0
self.lastSecond = thisSecond
def _cbPerspective(self, persp):
self.persp = persp
self.lastSecond = int(time.time())
self.callLoop(None)
def runTest(self):
factory = pb.PBClientFactory()
reactor.connectTCP(self.hostname, self.portno, factory)
factory.login(UsernamePassword("benchmark", "benchmark")).addCallback(self._cbPerspective)
def main():
PBBenchClient().runTest()
from twisted.internet import reactor
reactor.run()
if __name__ == '__main__':
main()
|
komsas/OpenUpgrade | refs/heads/master | addons/email_template/tests/__init__.py | 121 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail, test_ir_actions
checks = [
test_mail,
test_ir_actions,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
yrik/django-filter | refs/heads/master | django_filters/tests/test_urls.py | 4 | from django.conf.urls.defaults import *
from django_filters.tests.models import Book
urlpatterns = patterns('',
(r'^books/$', 'django_filters.views.object_filter', {'model': Book}),
)
|
scalingdata/Impala | refs/heads/rocana-master | shell/ext-py/sqlparse-0.1.14/sqlparse/utils.py | 99 | '''
Created on 17/05/2012
@author: piranna
'''
import re
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
if OrderedDict:
class Cache(OrderedDict):
"""Cache with LRU algorithm using an OrderedDict as basis
"""
def __init__(self, maxsize=100):
OrderedDict.__init__(self)
self._maxsize = maxsize
def __getitem__(self, key, *args, **kwargs):
# Get the key and remove it from the cache, or raise KeyError
value = OrderedDict.__getitem__(self, key)
del self[key]
# Insert the (key, value) pair on the front of the cache
OrderedDict.__setitem__(self, key, value)
# Return the value from the cache
return value
def __setitem__(self, key, value, *args, **kwargs):
# Key was inserted before, remove it so we put it at front later
if key in self:
del self[key]
# Too much items on the cache, remove the least recent used
elif len(self) >= self._maxsize:
self.popitem(False)
# Insert the (key, value) pair on the front of the cache
OrderedDict.__setitem__(self, key, value, *args, **kwargs)
else:
class Cache(dict):
"""Cache that reset when gets full
"""
def __init__(self, maxsize=100):
dict.__init__(self)
self._maxsize = maxsize
def __setitem__(self, key, value, *args, **kwargs):
# Reset the cache if we have too much cached entries and start over
if len(self) >= self._maxsize:
self.clear()
# Insert the (key, value) pair on the front of the cache
dict.__setitem__(self, key, value, *args, **kwargs)
def memoize_generator(func):
"""Memoize decorator for generators
Store `func` results in a cache according to their arguments as 'memoize'
does but instead this works on decorators instead of regular functions.
Obviusly, this is only useful if the generator will always return the same
values for each specific parameters...
"""
cache = Cache()
def wrapped_func(*args, **kwargs):
# params = (args, kwargs)
params = (args, tuple(sorted(kwargs.items())))
# Look if cached
try:
cached = cache[params]
# Not cached, exec and store it
except KeyError:
cached = []
for item in func(*args, **kwargs):
cached.append(item)
yield item
cache[params] = cached
# Cached, yield its items
else:
for item in cached:
yield item
return wrapped_func
# This regular expression replaces the home-cooked parser that was here before.
# It is much faster, but requires an extra post-processing step to get the
# desired results (that are compatible with what you would expect from the
# str.splitlines() method).
#
# It matches groups of characters: newlines, quoted strings, or unquoted text,
# and splits on that basis. The post-processing step puts those back together
# into the actual lines of SQL.
SPLIT_REGEX = re.compile(r"""
(
(?: # Start of non-capturing group
(?:\r\n|\r|\n) | # Match any single newline, or
[^\r\n'"]+ | # Match any character series without quotes or
# newlines, or
"(?:[^"\\]|\\.)*" | # Match double-quoted strings, or
'(?:[^'\\]|\\.)*' # Match single quoted strings
)
)
""", re.VERBOSE)
LINE_MATCH = re.compile(r'(\r\n|\r|\n)')
def split_unquoted_newlines(text):
"""Split a string on all unquoted newlines.
Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite
character is inside of a string."""
lines = SPLIT_REGEX.split(text)
outputlines = ['']
for line in lines:
if not line:
continue
elif LINE_MATCH.match(line):
outputlines.append('')
else:
outputlines[-1] += line
return outputlines |
dalegregory/odoo | refs/heads/8.0 | addons/l10n_be_coda/wizard/__init__.py | 439 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_coda_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sergei-maertens/django | refs/heads/master | tests/migrations/migrations_test_apps/lookuperror_a/migrations/0001_initial.py | 381 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='A1',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
],
),
]
|
b0ttl3z/SickRage | refs/heads/master | lib/unidecode/x07f.py | 252 | data = (
'Zhui ', # 0x00
'Zi ', # 0x01
'Ke ', # 0x02
'Xiang ', # 0x03
'Jian ', # 0x04
'Mian ', # 0x05
'Lan ', # 0x06
'Ti ', # 0x07
'Miao ', # 0x08
'Qi ', # 0x09
'Yun ', # 0x0a
'Hui ', # 0x0b
'Si ', # 0x0c
'Duo ', # 0x0d
'Duan ', # 0x0e
'Bian ', # 0x0f
'Xian ', # 0x10
'Gou ', # 0x11
'Zhui ', # 0x12
'Huan ', # 0x13
'Di ', # 0x14
'Lu ', # 0x15
'Bian ', # 0x16
'Min ', # 0x17
'Yuan ', # 0x18
'Jin ', # 0x19
'Fu ', # 0x1a
'Ru ', # 0x1b
'Zhen ', # 0x1c
'Feng ', # 0x1d
'Shuai ', # 0x1e
'Gao ', # 0x1f
'Chan ', # 0x20
'Li ', # 0x21
'Yi ', # 0x22
'Jian ', # 0x23
'Bin ', # 0x24
'Piao ', # 0x25
'Man ', # 0x26
'Lei ', # 0x27
'Ying ', # 0x28
'Suo ', # 0x29
'Mou ', # 0x2a
'Sao ', # 0x2b
'Xie ', # 0x2c
'Liao ', # 0x2d
'Shan ', # 0x2e
'Zeng ', # 0x2f
'Jiang ', # 0x30
'Qian ', # 0x31
'Zao ', # 0x32
'Huan ', # 0x33
'Jiao ', # 0x34
'Zuan ', # 0x35
'Fou ', # 0x36
'Xie ', # 0x37
'Gang ', # 0x38
'Fou ', # 0x39
'Que ', # 0x3a
'Fou ', # 0x3b
'Kaakeru ', # 0x3c
'Bo ', # 0x3d
'Ping ', # 0x3e
'Hou ', # 0x3f
'[?] ', # 0x40
'Gang ', # 0x41
'Ying ', # 0x42
'Ying ', # 0x43
'Qing ', # 0x44
'Xia ', # 0x45
'Guan ', # 0x46
'Zun ', # 0x47
'Tan ', # 0x48
'Chang ', # 0x49
'Qi ', # 0x4a
'Weng ', # 0x4b
'Ying ', # 0x4c
'Lei ', # 0x4d
'Tan ', # 0x4e
'Lu ', # 0x4f
'Guan ', # 0x50
'Wang ', # 0x51
'Wang ', # 0x52
'Gang ', # 0x53
'Wang ', # 0x54
'Han ', # 0x55
'[?] ', # 0x56
'Luo ', # 0x57
'Fu ', # 0x58
'Mi ', # 0x59
'Fa ', # 0x5a
'Gu ', # 0x5b
'Zhu ', # 0x5c
'Ju ', # 0x5d
'Mao ', # 0x5e
'Gu ', # 0x5f
'Min ', # 0x60
'Gang ', # 0x61
'Ba ', # 0x62
'Gua ', # 0x63
'Ti ', # 0x64
'Juan ', # 0x65
'Fu ', # 0x66
'Lin ', # 0x67
'Yan ', # 0x68
'Zhao ', # 0x69
'Zui ', # 0x6a
'Gua ', # 0x6b
'Zhuo ', # 0x6c
'Yu ', # 0x6d
'Zhi ', # 0x6e
'An ', # 0x6f
'Fa ', # 0x70
'Nan ', # 0x71
'Shu ', # 0x72
'Si ', # 0x73
'Pi ', # 0x74
'Ma ', # 0x75
'Liu ', # 0x76
'Ba ', # 0x77
'Fa ', # 0x78
'Li ', # 0x79
'Chao ', # 0x7a
'Wei ', # 0x7b
'Bi ', # 0x7c
'Ji ', # 0x7d
'Zeng ', # 0x7e
'Tong ', # 0x7f
'Liu ', # 0x80
'Ji ', # 0x81
'Juan ', # 0x82
'Mi ', # 0x83
'Zhao ', # 0x84
'Luo ', # 0x85
'Pi ', # 0x86
'Ji ', # 0x87
'Ji ', # 0x88
'Luan ', # 0x89
'Yang ', # 0x8a
'Mie ', # 0x8b
'Qiang ', # 0x8c
'Ta ', # 0x8d
'Mei ', # 0x8e
'Yang ', # 0x8f
'You ', # 0x90
'You ', # 0x91
'Fen ', # 0x92
'Ba ', # 0x93
'Gao ', # 0x94
'Yang ', # 0x95
'Gu ', # 0x96
'Qiang ', # 0x97
'Zang ', # 0x98
'Gao ', # 0x99
'Ling ', # 0x9a
'Yi ', # 0x9b
'Zhu ', # 0x9c
'Di ', # 0x9d
'Xiu ', # 0x9e
'Qian ', # 0x9f
'Yi ', # 0xa0
'Xian ', # 0xa1
'Rong ', # 0xa2
'Qun ', # 0xa3
'Qun ', # 0xa4
'Qian ', # 0xa5
'Huan ', # 0xa6
'Zui ', # 0xa7
'Xian ', # 0xa8
'Yi ', # 0xa9
'Yashinau ', # 0xaa
'Qiang ', # 0xab
'Xian ', # 0xac
'Yu ', # 0xad
'Geng ', # 0xae
'Jie ', # 0xaf
'Tang ', # 0xb0
'Yuan ', # 0xb1
'Xi ', # 0xb2
'Fan ', # 0xb3
'Shan ', # 0xb4
'Fen ', # 0xb5
'Shan ', # 0xb6
'Lian ', # 0xb7
'Lei ', # 0xb8
'Geng ', # 0xb9
'Nou ', # 0xba
'Qiang ', # 0xbb
'Chan ', # 0xbc
'Yu ', # 0xbd
'Gong ', # 0xbe
'Yi ', # 0xbf
'Chong ', # 0xc0
'Weng ', # 0xc1
'Fen ', # 0xc2
'Hong ', # 0xc3
'Chi ', # 0xc4
'Chi ', # 0xc5
'Cui ', # 0xc6
'Fu ', # 0xc7
'Xia ', # 0xc8
'Pen ', # 0xc9
'Yi ', # 0xca
'La ', # 0xcb
'Yi ', # 0xcc
'Pi ', # 0xcd
'Ling ', # 0xce
'Liu ', # 0xcf
'Zhi ', # 0xd0
'Qu ', # 0xd1
'Xi ', # 0xd2
'Xie ', # 0xd3
'Xiang ', # 0xd4
'Xi ', # 0xd5
'Xi ', # 0xd6
'Qi ', # 0xd7
'Qiao ', # 0xd8
'Hui ', # 0xd9
'Hui ', # 0xda
'Xiao ', # 0xdb
'Se ', # 0xdc
'Hong ', # 0xdd
'Jiang ', # 0xde
'Di ', # 0xdf
'Cui ', # 0xe0
'Fei ', # 0xe1
'Tao ', # 0xe2
'Sha ', # 0xe3
'Chi ', # 0xe4
'Zhu ', # 0xe5
'Jian ', # 0xe6
'Xuan ', # 0xe7
'Shi ', # 0xe8
'Pian ', # 0xe9
'Zong ', # 0xea
'Wan ', # 0xeb
'Hui ', # 0xec
'Hou ', # 0xed
'He ', # 0xee
'He ', # 0xef
'Han ', # 0xf0
'Ao ', # 0xf1
'Piao ', # 0xf2
'Yi ', # 0xf3
'Lian ', # 0xf4
'Qu ', # 0xf5
'[?] ', # 0xf6
'Lin ', # 0xf7
'Pen ', # 0xf8
'Qiao ', # 0xf9
'Ao ', # 0xfa
'Fan ', # 0xfb
'Yi ', # 0xfc
'Hui ', # 0xfd
'Xuan ', # 0xfe
'Dao ', # 0xff
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.