prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>envvar.go<|end_file_name|><|fim▁begin|>/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// EnvVarApplyConfiguration represents an declarative configuration of the EnvVar type for use
// with apply.
type EnvVarApplyConfiguration struct {
Name *string `json:"name,omitempty"`
Value *string `json:"value,omitempty"`
ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"`
}
// EnvVarApplyConfiguration constructs an declarative configuration of the EnvVar type for use with
// apply.
func EnvVar() *EnvVarApplyConfiguration {<|fim▁hole|>// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *EnvVarApplyConfiguration) WithName(value string) *EnvVarApplyConfiguration {
b.Name = &value
return b
}
// WithValue sets the Value field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Value field is set to the value of the last call.
func (b *EnvVarApplyConfiguration) WithValue(value string) *EnvVarApplyConfiguration {
b.Value = &value
return b
}
// WithValueFrom sets the ValueFrom field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the ValueFrom field is set to the value of the last call.
func (b *EnvVarApplyConfiguration) WithValueFrom(value *EnvVarSourceApplyConfiguration) *EnvVarApplyConfiguration {
b.ValueFrom = value
return b
}<|fim▁end|> | return &EnvVarApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value |
<|file_name|>preprocess.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
import re, csv, sys
from urlparse import urlparse
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.text import TextCollection
#process command line arguments
if len(sys.argv) < 2:
print "ERROR: arg1: must specify the input file"
print " arg2: specify -t to generate test ARFF"
sys.exit(1)
test = False
if len(sys.argv) > 2:
test = (sys.argv[2] == '-t')
# initialize some variables
stoplist = stopwords.words('english')
stoplist.extend(['.', ',', ':', '?', '!' ';', '"', "'", '-', '--', '(', ')', '/', '\\',
'[', ']', '{', '}', '|', '+', '*', '^'])
emots_pos = [':)', ':D', ':-)', ':-D', '=)', '=D', ':]', ':-]', '=]', 'X)', 'XD', 'X]',
'X-)', 'X-D', 'X-]', 'C:', ';)', ';D', ';]', ';-)', ';-D', ';-]', '<3',
':P', ':-P', '=P', 'XP', 'X-P', ':o)', ':3', ':>', '8)', ':^)', '8-D', '8D',
'=3', 'B^D', '\\o/', '<:', '(:', '(-:', '(=', '[:', '[-:', '[=', '(X', '[X',
'(-X', '[-X', ':\')', ':\'-)', ':\']', ':\'-]', '=\')', '=\']', ';^)',
'>:P', ':-b', ':b']
emots_pos = [emot.lower() for emot in emots_pos]
emots_neg = [':(', ':[', ':-(', ':-[', 'D:', '=(', '=[', 'D=', 'DX', ':C', '</3',
'>:[', ':-c', ':-<', ':<', '>:', ':{', ':\'-(', ':\'(', ':\'[', '=\'(',
'=\'[', 'D;', 'D\':', 'D:<', 'D8', 'D-\':', '):', ']:', ')-:', ']-:',
')=', ']=', ']:<', '>-:']
<|fim▁hole|>gaz_neg = []
tweets = []
sentiments = []
emots_count = []
punct_count = []
gaz_count = []
words = [] #will contain all non-stop words that occur >1 times
words1 = [] #will contain all non-stop words that occur 1 time
# generate the gazetteers
gaz_file = open('positive-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_pos.append(line)
gaz_file.close()
gaz_file = open('negative-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_neg.append(line)
gaz_file.close()
# print some information
print 'Number of positive emoticons: ' + str(len(emots_pos))
print 'Number of negative emoticons: ' + str(len(emots_neg))
print '\nNumber of positive gazetteer words: ' + str(len(gaz_pos))
print 'Number of negative gazetteer words: ' + str(len(gaz_neg))
# extract all tweets and words (IN TRAINING)
words_file = []
if not test:
words_file = open('words-list.txt', 'w') # COMMENT OUT FOR TESTING
tweet_file = open(sys.argv[1], 'rb')
reader = csv.reader(tweet_file, delimiter=',', quotechar='"', escapechar='\\', quoting=csv.QUOTE_ALL)
for line in reader:
# save tweet data
tweet = line[4].lower()
sent = line[1]
# REMOVE THIS SECTION FOR TESTING
if not test:
if sent == 'positive':
sent = 'POS'
elif sent == 'negative':
sent = 'NEG'
else:
sent = 'OTHER'
sentiments.append(sent)
# standardize URLs
w = tweet.split()
for i in range(len(w)):
r = urlparse(w[i])
if r[0] != '' and r[1] != '':
w[i] = 'URL'
tweet = ' '.join(w)
tweets.append(tweet)
# count emoticons
count_pos = 0
for emot in emots_pos:
count_pos += tweet.count(emot)
count_neg = 0
for emot in emots_neg:
count_neg += tweet.count(emot)
emots_count.append( (count_pos, count_neg) )
# count punctuation
punct_count.append( (tweet.count('?'), tweet.count('!')) )
# count gazetteer words
count_pos = 0
for gw in gaz_pos:
count_pos += tweet.count(gw)
count_neg = 0
for gw in gaz_neg:
count_neg += tweet.count(gw)
gaz_count.append( (count_pos, count_neg) )
# USE THIS SECTION FOR TRAINING
# extract only words used >1 times, and ignore stopwords
if not test :
tweet_sents = sent_tokenize(tweet)
for sent in tweet_sents:
sw = word_tokenize(sent)
for word in sw:
if word not in stoplist:
if word not in words:
if word in words1:
words.append(word)
words_file.write(word + '\n')
else:
words1.append(word)
tweet_file.close()
if not test:
words_file.close() # COMMENT OUT FOR TESTING
# USE THIS SECTION FOR TESTING
# extract all words (IN TESTING)
if test:
wfile = open('words-list.txt', 'r')
for line in wfile:
words.append(line.strip())
wfile.close()
# print some more information
print '\nNumber of tweets: ' + str(len(tweets))
print 'Number of words occuring >1 time: ' + str(len(words))
print 'Number of words occuring 1 time: ' + str(len(words1))
# create .arff file for Weka
texts = TextCollection(tweets)
arff = open('tweets_sentiment.arff', "w")
wc = 0
# header
arff.write("@relation sentiment_analysis\n\n")
arff.write("@attribute numPosEmots numeric\n")
arff.write("@attribute numNegEmots numeric\n")
arff.write("@attribute numQuest numeric\n")
arff.write("@attribute numExclam numeric\n")
arff.write("@attribute numPosGaz numeric\n")
arff.write("@attribute numNegGaz numeric\n")
for word in words:
arff.write("@attribute word_")
sub_w = re.subn('[^a-zA-Z]', 'X', word)
arff.write(sub_w[0])
if sub_w[1] > 0:
arff.write('_' + str(wc))
wc += 1
arff.write(" numeric\n")
arff.write("@attribute class {POS, NEG, OTHER}\n\n")
arff.write("@data\n")
# data
for i in xrange(len(tweets)):
arff.write(str(emots_count[i][0]) + ',' + str(emots_count[i][1]) + ',')
arff.write(str(punct_count[i][0]) + ',' + str(punct_count[i][1]) + ',')
arff.write(str(gaz_count[i][0]) + ',' + str(gaz_count[i][1]) + ',')
for j in xrange(len(words)): #loop through unigrams
arff.write(str(texts.tf_idf(words[j], tweets[i])) + ',')
arff.write(sentiments[i] + '\n')
arff.close()
print '\nFinished pre-processing! The ARFF file for Weka has been created.'<|fim▁end|> | emots_neg = [emot.lower() for emot in emots_neg]
gaz_pos = []
|
<|file_name|>sendControllerSpec.js<|end_file_name|><|fim▁begin|>'use strict';
var expect = chai.expect;
function run(scope,done) {
done();
}
describe('SendCtrl', function(){
var rootScope, scope, controller_injector, dependencies, ctrl,
sendForm, network, timeout, spy, stub, mock, res, transaction, data;
beforeEach(module("rp"));
beforeEach(inject(function($rootScope, $controller, $q, $timeout, rpNetwork) {
network = rpNetwork;
rootScope = rootScope;
timeout = $timeout;
scope = $rootScope.$new();
scope.currencies_all = [{ name: 'XRP - Ripples', value: 'XRP'}];
controller_injector = $controller;
// Stub the sendForm, which should perhaps be tested using
// End To End tests
scope.sendForm = {
send_destination: {
$setValidity: function(){}
},
$setPristine: function(){},
$setValidity: function(){}
};
scope.$apply = function(func){func()};
scope.saveAddressForm = {
$setPristine: function () {}
}
scope.check_dt_visibility = function () {};
dependencies = {
$scope: scope,
$element: null,
$network: network,
rpId: {
loginStatus: true,
account: 'r4EwBWxrx5HxYRyisfGzMto3AT8FZiYdWk'
}
}
ctrl = controller_injector("SendCtrl", dependencies);
}));
it('should be initialized with defaults', function (done) {
assert.equal(scope.mode, 'form');
assert.isObject(scope.send);
assert.equal(scope.send.currency, 'XRP - Ripples');
assert.isFalse(scope.show_save_address_form);
assert.isFalse(scope.addressSaved);
assert.equal(scope.saveAddressName, '');
assert.isFalse(scope.addressSaving);
done()
});
it('should reset destination dependencies', function (done) {
assert.isFunction(scope.reset_destination_deps);
done();
});
describe('updating the destination', function (done) {
beforeEach(function () {
scope.send.recipient_address = 'r4EwBWxrx5HxYRyisfGzMto3AT8FZiYdWk';
})
it('should have a function to do so', function (done) {
assert.isFunction(scope.update_destination);
done();
});
describe('when the recipient is the same as last time', function (done) {
beforeEach(function () {
scope.send.last_recipient = scope.send.recipient_address;
});
it('should not reset destination dependencies', function (done) {
spy = sinon.spy(scope, 'reset_destination_deps');
scope.update_destination();
assert(spy.notCalled);
done();
});
it('should not check destination tag visibility', function (done) {
spy = sinon.spy(scope, 'check_dt_visibility');
scope.update_destination();
assert(spy.notCalled);
done();
});
});
describe('when the recipient is new', function (done) {
beforeEach(function () {
scope.send.last_recipient = null;
});
it('should reset destination dependencies', function (done) {
spy = sinon.spy(scope, 'reset_destination_deps');
scope.update_destination();
assert(spy.called);
done();
});
it('should check destination tag visibility', function (done) {
spy = sinon.spy(scope, 'check_dt_visibility');
scope.update_destination();
assert(spy.called);
done();
});
});
})
describe('updating the destination remote', function (done) {
it('should have a function to do so', function (done) {
assert.isFunction(scope.update_destination_remote);
done();
});
it('should validate the federation field by default', function (done) {
var setValiditySpy = sinon.spy(
scope.sendForm.send_destination, '$setValidity');
scope.update_destination_remote();
assert(setValiditySpy.withArgs('federation', true).called);
done();
})
describe('when it is not bitcoin', function (done) {
beforeEach(function () {
scope.send.bitcoin = null
})
it('should check destination', function (done) {
var spy = sinon.spy(scope, 'check_destination');
scope.update_destination_remote();
assert(spy.calledOnce);
done();
});
});
describe('when it is bitcoin', function (done) {
beforeEach(function () {
scope.send.bitcoin = true;
});
it('should update currency constraints', function (done) {
var spy = sinon.spy(scope, 'update_currency_constraints');
scope.update_destination_remote();
spy.should.have.been.calledOnce;
done();
});
it('should not check destination', function (done) {
var spy = sinon.spy(scope, 'check_destination');
scope.update_destination_remote();
assert(!spy.called);
done();
});
})
})
it('should check the destination', function (done) {
assert.isFunction(scope.check_destination);
done();
})
it('should handle paths', function (done) {
assert.isFunction(scope.handle_paths);
done();
});
it('should update paths', function (done) {
assert.isFunction(scope.update_paths);
done();
});
describe('updating currency constraints', function () {
it('should have a function to do so', function (done) {
assert.isFunction(scope.update_currency_constraints);
done();
});
it('should update the currency', function (done) {
stub = sinon.stub(scope, 'update_currency');
scope.update_currency_constraints();
assert(spy.called);
done();
});
describe('when recipient info is not loaded', function () {
it('should not update the currency', function (done) {
stub = sinon.stub(scope, 'update_currency');
scope.send.recipient_info.loaded = false;
scope.update_currency_constraints();
assert(spy.called);
done();
});
});
});
it('should reset the currency dependencies', function (done) {
assert.isFunction(scope.reset_currency_deps);
var spy = sinon.spy(scope, 'reset_amount_deps');
scope.reset_currency_deps();
assert(spy.calledOnce);
done();
});
it('should update the currency', function (done) {
assert.isFunction(scope.update_currency);
done();
});
describe('resetting the amount dependencies', function (done) {
it('should have a function to do so', function (done) {
assert.isFunction(scope.reset_amount_deps);
done();
});
it('should set the quote to false', function (done) {
scope.send.quote = true;
scope.reset_amount_deps();
assert.isFalse(scope.send.quote);
done();
});
it('should falsify the sender insufficient xrp flag', function (done) {
scope.send.sender_insufficient_xrp = true;
scope.reset_amount_deps();
assert.isFalse(scope.send.sender_insufficient_xrp);
done();
});
it('should reset the paths', function (done) {
spy = sinon.spy(scope, 'reset_paths');
scope.reset_amount_deps();
assert(spy.calledOnce);
done();
});
});
it('should update the amount', function (done) {
assert.isFunction(scope.update_amount);
done();
});
it('should update the quote', function (done) {
assert.isFunction(scope.update_quote);
done();
});
describe('resetting paths', function (done) {
it('should have a function to do so', function (done) {
assert.isFunction(scope.reset_paths);
done();
});
it('should set the send alternatives to an empty array', function (done) {
scope.send.alternatives = ['not_an_empty_array'];
scope.reset_paths();
assert(Array.isArray(scope.send.alternatives));
assert.equal(scope.send.alternatives.length, 0);
done();
});
});
it('should rest the paths', function (done) {
assert.isFunction(scope.reset_paths);
done();
});
it('should cancel the form', function (done) {
assert.isFunction(scope.cancelConfirm);
scope.send.alt = '';
scope.mode = null;
scope.cancelConfirm();
assert.equal(scope.mode, 'form');
assert.isNull(scope.send.alt);
done();
});
describe('resetting the address form', function () {
it('should have a function to do so', function (done) {
assert.isFunction(scope.resetAddressForm);
done();
});
it('should falsify show_save_address_form field', function (done) {
scope.show_save_address_form = true
scope.resetAddressForm();
assert.isFalse(scope.show_save_address_form);
done();
});
it('should falsify the addressSaved field', function (done) {
scope.addressSaved = true;
scope.resetAddressForm();
assert.isFalse(scope.addressSaved);
done();
});
it('should falsify the addressSaving field', function (done) {
scope.saveAddressName = null;
scope.resetAddressForm();
assert.equal(scope.saveAddressName, '');
done();
});
it('should empty the saveAddressName field', function (done) {
scope.addressSaving = true;
scope.resetAddressForm();
assert.isFalse(scope.addressSaving);
done();
});
it('should set the form to pristine state', function (done) {
spy = sinon.spy(scope.saveAddressForm, '$setPristine');
scope.resetAddressForm();
assert(spy.calledOnce);
done();
});
});
describe('performing reset goto', function () {
it('should have a function to do so', function (done) {
assert.isFunction(scope.reset_goto);
done();
});
it('should reset the scope', function (done) {
spy = sinon.spy(scope, 'reset');
scope.reset_goto();
assert(spy.calledOnce);
done();
});
it('should navigate the page to the tabname provide', function (done) {
var tabName = 'someAwesomeTab';
scope.reset_goto(tabName);
assert.equal(document.location.hash, '#' + tabName);
done();
});
})
it('should perform a reset goto', function (done) {
var mock = sinon.mock(scope);
mock.expects('reset').once();
scope.reset_goto();
mock.verify();
done();
});
describe("handling when the send is prepared", function () {
it('should have a function to do so', function (done) {
assert.isFunction(scope.send_prepared);
done();
});
it('should set confirm wait to true', function (done) {
scope.send_prepared();
assert.isTrue(scope.confirm_wait);
done();
});
it("should set the mode to 'confirm'", function (done) {
assert.notEqual(scope.mode, 'confirm');
scope.send_prepared();
assert.equal(scope.mode, 'confirm');
done();
})
it('should set confirm_wait to false after a timeout', function (done) {
scope.send_prepared();
assert.isTrue(scope.confirm_wait);
// For some reason $timeout.flush() works but then raises an exception
try { timeout.flush() }
catch (e) {}
assert.isFalse(scope.confirm_wait);
done();
});
});
describe('handling when a transaction send is confirmed', function (done) {
beforeEach(function () {
scope.send.recipient_address = 'r4EwBWxrx5HxYRyisfGzMto3AT8FZiYdWk';
});
describe("handling a 'propose' event from ripple-lib", function (done) {
beforeEach(function () {
scope.send = {
amount_feedback: {
currency: function () {
function to_human () {
return 'somestring';
}
return { to_human: to_human }
}
}
}
transaction = {
hash: 'E64165A4ED2BF36E5922B11C4E192DF068E2ADC21836087DE5E0B1FDDCC9D82F'
}
res = {
engine_result: 'arbitrary_engine_result',
engine_result_message: 'arbitrary_engine_result_message'
}
});
it('should call send with the transaction hash', function (done) {
spy = sinon.spy(scope, 'sent');
scope.onTransactionProposed(res, transaction);
assert(spy.calledWith(transaction.hash));
done();
});
it('should set the engine status with the response', function (done) {
spy = sinon.spy(scope, 'setEngineStatus');
scope.onTransactionProposed(res, transaction);
assert(spy.called);
done();
});
});
describe("handling errors from the server", function () {
describe("any error", function (done) {
it('should set the mode to error', function (done) {
var res = { error: null };
scope.onTransactionError(res, null);
setTimeout(function (){
assert.equal(scope.mode, "error");
done();
}, 10)
});
});
});
it('should have a function to handle send confirmed', function (done) {
assert.isFunction(scope.send_confirmed);
done();
});
it('should create a transaction', function (done) {
spy = sinon.spy(network.remote, 'transaction');
scope.send_confirmed();
assert(spy.called);
done();
});
})
describe('saving an address', function () {
beforeEach(function () {
scope.userBlob = {
data: {
contacts: []
}
};
});
it('should have a function to do so', function (done) {
assert.isFunction(scope.saveAddress);
done();
});
it("should set the addressSaving property to true", function (done) {
assert.isFalse(scope.addressSaving);
scope.saveAddress();
assert.isTrue(scope.addressSaving);
done();
})
it("should listen for blobSave event", function (done) {
var onBlobSaveSpy = sinon.spy(scope, '$on');
scope.saveAddress();
assert(onBlobSaveSpy.withArgs('$blobSave').calledOnce);
done();
});
it("should add the contact to the blob's contacts", function (done) {
assert(scope.userBlob.data.contacts.length == 0);
scope.saveAddress();
assert(scope.userBlob.data.contacts.length == 1);
done();
});
describe('handling a blobSave event', function () {
describe('having called saveAddress', function () {
beforeEach(function () {
scope.saveAddress();
});
it('should set addressSaved to true', function (done) {
assert.isFalse(scope.addressSaved);
scope.$emit('$blobSave');
assert.isTrue(scope.addressSaved);
done();
});
it("should set the contact as the scope's contact", function (done) {
assert.isUndefined(scope.contact);
scope.$emit('$blobSave');
assert.isObject(scope.contact);
done();
});
})
describe('without having called saveAddress', function () {
it('should not set addressSaved', function (done) {
assert.isFalse(scope.addressSaved);
scope.$emit('$blobSave');
assert.isFalse(scope.addressSaved);
done();
});
})
})
});
describe('setting engine status', function () {
beforeEach(function () {
res = {
engine_result: 'arbitrary_engine_result',
engine_result_message: 'arbitrary_engine_result_message'
}
});
describe("when the response code is 'tes'", function() {
beforeEach(function () {
res.engine_result = 'tes';
})
describe('when the transaction is accepted', function () {
it("should set the transaction result to cleared", function (done) {
var accepted = true;
scope.setEngineStatus(res, accepted);
assert.equal(scope.tx_result, 'cleared');
done();
});
});
describe('when the transaction not accepted', function () {
it("should set the transaction result to pending", function (done) {
var accepted = false;
scope.setEngineStatus(res, accepted);
assert.equal(scope.tx_result, 'pending');
done();
});
});
});
describe("when the response code is 'tep'", function() {
beforeEach(function () {
res.engine_result = 'tep';
})
it("should set the transaction result to partial", function (done) {
scope.setEngineStatus(res, true);
assert.equal(scope.tx_result, 'partial');
done();
});
});
});
describe('handling sent transactions', function () {
it('should update the mode to status', function (done) {
assert.isFunction(scope.sent);
assert.equal(scope.mode, 'form');
scope.sent();
assert.equal(scope.mode, 'status');
done();
})
it('should listen for transactions on the network', function (done) {
var remoteListenerSpy = sinon.spy(network.remote, 'on');
scope.sent();
assert(remoteListenerSpy.calledWith('transaction'));
done();
})
describe('handling a transaction event', function () {
beforeEach(function () {
var hash = 'testhash';
scope.sent(hash);
data = {<|fim▁hole|> hash: hash
}
}
stub = sinon.stub(scope, 'setEngineStatus');
});
afterEach(function () {
scope.setEngineStatus.restore();
})
it('should set the engine status', function (done) {
network.remote.emit('transaction', data);
assert(stub.called);
done();
});
it('should stop listening for transactions', function (done) {
spy = sinon.spy(network.remote, 'removeListener');
network.remote.emit('transaction', data);
assert(spy.called);
done();
})
})
})
});<|fim▁end|> | transaction: { |
<|file_name|>adding_prob_src.rs<|end_file_name|><|fim▁begin|>extern crate rand;
use rand::distributions::{Range, IndependentSample};
use af;
use af::{Array, Dim4, MatProp, DType};
use std::cell::{RefCell, Cell};
use initializations::uniform;
use utils;
use data::{Data, DataSource, DataParams, Normalize, Shuffle};
pub struct AddingProblemSource {
pub params: DataParams,
pub iter: Cell<u64>,
pub offset: Cell<f32>,
pub bptt_unroll : u64,
}
impl AddingProblemSource {
pub fn new(batch_size: u64, bptt_unroll: u64, dtype: DType, max_samples: u64) -> AddingProblemSource
{
assert!(bptt_unroll % 4 == 0, "The number of time steps has to be divisible by 4 for the adding problem");
let input_dims = Dim4::new(&[batch_size, 1, bptt_unroll, 1]);
let target_dims = Dim4::new(&[batch_size, 1, bptt_unroll, 1]);
let train_samples = 0.7 * max_samples as f32;
let test_samples = 0.2 * max_samples as f32;
let validation_samples = 0.1 * max_samples as f32;
AddingProblemSource {
params : DataParams {
input_dims: input_dims,
target_dims: target_dims,
dtype: dtype,
normalize: false,
shuffle: false,
current_epoch: Cell::new(0),
num_samples: max_samples,
num_train: train_samples as u64,
num_test: test_samples as u64,
num_validation: Some(validation_samples as u64),
},
iter: Cell::new(0),
offset: Cell::new(0.0f32),
bptt_unroll: bptt_unroll,
}
}
fn generate_input(&self, batch_size: u64, bptt_unroll: u64) -> Array {
let dim1 = Dim4::new(&[batch_size, 1, bptt_unroll/2, 1]);
let ar1 = uniform::<f32>(dim1,-1.0, 1.0);
let between1 = Range::new(0, bptt_unroll/4);
let between2 = Range::new(bptt_unroll/4, bptt_unroll/2);
let mut rng1 = rand::thread_rng();
let mut rng2 = rand::thread_rng();
let mut vec_total = Vec::with_capacity((batch_size*bptt_unroll) as usize);
let vec_zeros = vec!(0f32; (bptt_unroll/2) as usize);
for _ in 0..batch_size {
let index1 = between1.ind_sample(&mut rng1) as usize;
let index2 = between2.ind_sample(&mut rng2) as usize;
let mut vec_temp = vec_zeros.clone();
vec_temp[index1] = 1f32;
vec_temp[index2] = 1f32;
vec_total.extend(vec_temp);
}
let dim2 = Dim4::new(&[bptt_unroll/2, batch_size, 1, 1]);
let ar2 = af::moddims(&af::transpose(&utils::vec_to_array::<f32>(vec_total, dim2), false), dim1);
af::join(2, &ar1, &ar2)
}
fn generate_target(&self, input: &Array, batch_size: u64, bptt_unroll: u64) -> Array {
let first = af::slices(&input, 0, bptt_unroll/2-1);
let second = af::slices(&input, bptt_unroll/2, bptt_unroll-1);
let ar = af::mul(&first, &second, false);
let zeros = af::constant(0f32, Dim4::new(&[batch_size, 1, bptt_unroll, 1]));
af::add(&af::sum(&ar, 2), &zeros, true)
}
}
impl DataSource for AddingProblemSource {
fn get_train_iter(&self, num_batch: u64) -> Data {
let inp = self.generate_input(num_batch
, self.params.input_dims[2]);
let tar = self.generate_target(&inp
, num_batch
, self.params.input_dims[2]);
let batch = Data {
input: RefCell::new(Box::new(inp.clone())),
target: RefCell::new(Box::new(tar.clone())),
};
let current_iter = self.params.current_epoch.get();
if self.iter.get() == self.params.num_samples as u64/ num_batch as u64 {
self.params.current_epoch.set(current_iter + 1);
self.iter.set(0);
}
self.iter.set(self.iter.get() + 1);
batch
}
fn info(&self) -> DataParams {
self.params.clone()
}
fn get_test_iter(&self, num_batch: u64) -> Data {<|fim▁hole|> }
fn get_validation_iter(&self, num_batch: u64) -> Option<Data> {
Some( self.get_train_iter(num_batch))
}
}<|fim▁end|> | self.get_train_iter(num_batch) |
<|file_name|>plugin.py<|end_file_name|><|fim▁begin|># Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import functools
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch import version
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(n_rpc.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(n_rpc.RpcCallback,
sg_rpc_base.SecurityGroupServerRpcCallbackMixin,
dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
# this method is used by the ML2 driver so it can't directly invoke
# the self.get_(ports|networks) methods
plugin = manager.NeutronManager.get_plugin()
all_networks = plugin.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = mapped_network
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = plugin.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers:
routers = []
all_routers = self.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = self.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
<|fim▁hole|> resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
# In ML2, the host_id is already populated
if portbindings.HOST_ID in port:
hostid = port[portbindings.HOST_ID]
else:
hostid = porttracker_db.get_port_hostid(context, port['id'])
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases,
portbindings.OVS_HYBRID_PLUG: True
}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
def put_context_in_serverpool(f):
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
self.servers.set_context(context)
return f(self, context, *args, **kwargs)
return wrapper
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
sg_rpc_base.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "router", "binding",
"router_rules", "extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version.version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the Big Switch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool()
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.endpoints = [RestProxyCallbacks(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
@put_context_in_serverpool
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
@put_context_in_serverpool
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
@put_context_in_serverpool
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# non-router port status is set to pending. it is then updated
# after the async rest call completes. router ports are synchronous
if port['port']['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
port['port']['status'] = const.PORT_STATUS_ACTIVE
else:
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
# ports have to be created synchronously when creating a router
# port since adding router interfaces is a multi-call process
if mapped_port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF:
self.servers.rest_create_port(net["tenant_id"],
new_port["network_id"],
mapped_port)
else:
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
@put_context_in_serverpool
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
router_ids = self.disassociate_floatingips(
context, port_id, do_notify=False)
self._delete_port_security_group_bindings(context, port_id)
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenid = self._get_port_net_tenantid(context, port)
self._delete_port(context, port_id)
self.servers.rest_delete_port(tenid, port['network_id'], port_id)
# now that we've left db transaction, we are safe to notify
self.notify_routers_updated(context, router_ids)
@put_context_in_serverpool
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
@put_context_in_serverpool
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
@put_context_in_serverpool
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
mapped_router = self._map_state_and_status(new_router)
self.servers.rest_create_router(tenant_id, mapped_router)
# return created router
return new_router
@put_context_in_serverpool
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
with context.session.begin(subtransactions=True):
new_router = super(NeutronRestProxyV2,
self).update_router(context, router_id, router)
router = self._map_state_and_status(new_router)
# update router on network controller
self.servers.rest_update_router(tenant_id, router, router_id)
# return updated router
return new_router
# NOTE(kevinbenton): workaround for eventlet/mysql deadlock.
# delete_router ends up calling _delete_port instead of delete_port.
@utils.synchronized('bsn-port-barrier')
@put_context_in_serverpool
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
ret_val = super(NeutronRestProxyV2,
self).delete_router(context, router_id)
# delete from network ctrl
self.servers.rest_delete_router(tenant_id, router_id)
return ret_val
@put_context_in_serverpool
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
with context.session.begin(subtransactions=True):
# create interface in DB
new_intf_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_intf_info['port_id'])
net_id = port['network_id']
subnet_id = new_intf_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
self.servers.rest_add_router_interface(tenant_id, router_id,
intf_details)
return new_intf_info
@put_context_in_serverpool
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
# remove router in DB
del_ret = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
self.servers.rest_remove_router_interface(tenant_id, router_id,
interface_id)
return del_ret
@put_context_in_serverpool
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
with context.session.begin(subtransactions=True):
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
# create floatingip on the network controller
try:
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_create_floatingip(
new_fl_ip['tenant_id'], new_fl_ip)
else:
self._send_floatingip_update(context)
except servermanager.RemoteRestError as e:
with excutils.save_and_reraise_exception():
LOG.error(
_("NeutronRestProxyV2: Unable to create remote "
"floating IP: %s"), e)
# return created floating IP
return new_fl_ip
@put_context_in_serverpool
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
with context.session.begin(subtransactions=True):
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
new_fl_ip, id)
else:
self._send_floatingip_update(context)
return new_fl_ip
@put_context_in_serverpool
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
# delete floating IP in DB
old_fip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
else:
self._send_floatingip_update(context)
@put_context_in_serverpool
def disassociate_floatingips(self, context, port_id, do_notify=True):
LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called"))
router_ids = super(NeutronRestProxyV2, self).disassociate_floatingips(
context, port_id, do_notify=do_notify)
self._send_floatingip_update(context)
return router_ids
# overriding method from l3_db as original method calls
# self.delete_floatingip() which in turn calls self.delete_port() which
# is locked with 'bsn-port-barrier'
@put_context_in_serverpool
def delete_disassociated_floatingips(self, context, network_id):
query = self._model_query(context, l3_db.FloatingIP)
query = query.filter_by(floating_network_id=network_id,
fixed_port_id=None,
router_id=None)
for fip in query:
context.session.delete(fip)
self._delete_port(context.elevated(), fip['floating_port_id'])
def _send_floatingip_update(self, context):
try:
ext_net_id = self.get_external_network_id(context)
if ext_net_id:
# Use the elevated state of the context for the ext_net query
admin_context = context.elevated()
ext_net = super(NeutronRestProxyV2,
self).get_network(admin_context, ext_net_id)
# update external network on network controller
self._send_update_network(ext_net, admin_context)
except exceptions.TooManyExternalNetworks:
# get_external_network can raise errors when multiple external
# networks are detected, which isn't supported by the Plugin
LOG.error(_("NeutronRestProxyV2: too many external networks"))
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})<|fim▁end|> | def _map_state_and_status(self, resource):
resource = copy.copy(resource)
|
<|file_name|>power_digit_sum.py<|end_file_name|><|fim▁begin|>def power_digit_sum(exponent):<|fim▁hole|><|fim▁end|> | power_of_2 = str(2 ** exponent)
return sum([int(x) for x in power_of_2]) |
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>"""
Easy-to-use UMFPACK interface
=============================
.. currentmodule:: scikits.umfpack
The following functions can be used for LU decompositions and solving
equation systems:
.. autosummary::
:toctree: reference/
spsolve
splu
UmfpackLU
"""
from __future__ import division, print_function, absolute_import
from warnings import warn
import sys
import numpy as np
from numpy import asarray
from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
SparseEfficiencyWarning, csc_matrix, hstack)
from .umfpack import UmfpackContext, UMFPACK_A
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
__all__ = ['spsolve', 'splu', 'UmfpackLU']
if sys.version_info[0] >= 3:
xrange = range
def spsolve(A, b):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
"""
x = UmfpackLU(A).solve(b)
if b.ndim == 2 and b.shape[1] == 1:
# compatibility with scipy.sparse.spsolve quirk
return x.ravel()
else:
return x
def splu(A):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
Returns
-------
invA : scikits.umfpack.UmfpackLU
Object, which has a ``solve`` method.
Notes
-----
This function uses the UMFPACK library.
"""
return UmfpackLU(A)
class UmfpackLU(object):
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr * (R^-1) * A * Pc = L * U
Parameters
----------
A : csc_matrix or csr_matrix
Matrix to decompose
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
R
Methods
-------
solve
solve_sparse
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scikits import umfpack
>>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = umfpack.splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([0, 2, 1, 3], dtype=int32)
>>> lu.perm_c
array([2, 0, 1, 3], dtype=int32)
The L and U factors are sparse matrices in CSC format:
>>> lu.L.A
array([[ 1. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0.5, 0.5, 1. ]])
>>> lu.U.A
array([[ 2., 0., 1., 4.],
[ 0., 2., 1., 1.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., -5.]])
The permutation matrices can be constructed:
>>> Pr = csc_matrix((4, 4))
>>> Pr[lu.perm_r, np.arange(4)] = 1
>>> Pc = csc_matrix((4, 4))
>>> Pc[np.arange(4), lu.perm_c] = 1
Similarly for the row scalings:
>>> R = csc_matrix((4, 4))
>>> R.setdiag(lu.R)
We can reassemble the original matrix:
>>> (Pr.T * R * (lu.L * lu.U) * Pc.T).A
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
"""
def __init__(self, A):
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
f_type = np.sctypeDict[A.dtype.name]
i_type = np.sctypeDict[A.indices.dtype.name]
try:
family = _families[(f_type, i_type)]
except KeyError:
msg = 'only float64 or complex128 matrices with int32 or int64' \
' indices are supported! (got: matrix: %s, indices: %s)' \
% (f_type, i_type)
raise ValueError(msg)
self.umf = UmfpackContext(family)
self.umf.numeric(A)
self._A = A
self._L = None
self._U = None
self._P = None
self._Q = None
self._R = None
def solve(self, b):
"""
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
"""<|fim▁hole|> raise ValueError("Shape of b is not compatible with that of A")
b_arr = asarray(b, dtype=self._A.dtype).reshape(b.shape[0], -1)
x = np.zeros((self._A.shape[0], b_arr.shape[1]), dtype=self._A.dtype)
for j in range(b_arr.shape[1]):
x[:,j] = self.umf.solve(UMFPACK_A, self._A, b_arr[:,j], autoTranspose=True)
return x.reshape((self._A.shape[0],) + b.shape[1:])
def solve_sparse(self, B):
"""
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
"""
B = B.tocsc()
cols = list()
for j in xrange(B.shape[1]):
col = self.solve(B[:,j])
cols.append(csc_matrix(col))
return hstack(cols)
def _compute_lu(self):
if self._L is None:
self._L, self._U, self._P, self._Q, self._R, do_recip = self.umf.lu(self._A)
if do_recip:
with np.errstate(divide='ignore'):
np.reciprocal(self._R, out=self._R)
# Conform to scipy.sparse.splu convention on permutation matrices
self._P = self._P[self._P]
@property
def shape(self):
"""
Shape of the original matrix as a tuple of ints.
"""
return self._A.shape
@property
def L(self):
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_matrix`.
"""
self._compute_lu()
return self._L
@property
def U(self):
"""
Upper triangular factor as a `scipy.sparse.csc_matrix`.
"""
self._compute_lu()
return self._U
@property
def R(self):
"""
Row scaling factors, as a 1D array.
"""
self._compute_lu()
return self._R
@property
def perm_c(self):
"""
Permutation Pc represented as an array of indices.
The column permutation matrix can be reconstructed via:
>>> Pc = np.zeros((n, n))
>>> Pc[np.arange(n), perm_c] = 1
"""
self._compute_lu()
return self._Q
@property
def perm_r(self):
"""
Permutation Pr represented as an array of indices.
The row permutation matrix can be reconstructed via:
>>> Pr = np.zeros((n, n))
>>> Pr[perm_r, np.arange(n)] = 1
"""
self._compute_lu()
return self._P
@property
def nnz(self):
"""
Combined number of nonzeros in L and U: L.nnz + U.nnz
"""
return self._L.nnz + self._U.nnz<|fim▁end|> | if isspmatrix(b):
b = b.toarray()
if b.shape[0] != self._A.shape[1]: |
<|file_name|>gu.js<|end_file_name|><|fim▁begin|>/**
* @license Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
CKEDITOR.plugins.setLang( 'a11yhelp', 'gu', {
title: 'એક્ક્ષેબિલિટી ની વિગતો',
contents: 'હેલ્પ. આ બંધ કરવા ESC દબાવો.',
legend: [
{
name: 'જનરલ',
items: [
{
name: 'એડિટર ટૂલબાર',
legend: 'Press ${toolbarFocus} to navigate to the toolbar. Move to the next and previous toolbar group with TAB and SHIFT+TAB. Move to the next and previous toolbar button with RIGHT ARROW or LEFT ARROW. Press SPACE or ENTER to activate the toolbar button.' // MISSING
},
{
name: 'એડિટર ડાયલોગ',
legend:
'Inside a dialog, press TAB to navigate to the next dialog element, press SHIFT+TAB to move to the previous dialog element, press ENTER to submit the dialog, press ESC to cancel the dialog. When a dialog has multiple tabs, the tab list can be reached either with ALT+F10 or with TAB as part of the dialog tabbing order. With tab list focused, move to the next and previous tab with RIGHT and LEFT ARROW, respectively.' // MISSING
},
{
name: 'Editor Context Menu', // MISSING
legend: 'Press ${contextMenu} or APPLICATION KEY to open context-menu. Then move to next menu option with TAB or DOWN ARROW. Move to previous option with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the menu option. Open sub-menu of current option with SPACE or ENTER or RIGHT ARROW. Go back to parent menu item with ESC or LEFT ARROW. Close context menu with ESC.' // MISSING
},
{
name: 'Editor List Box', // MISSING
legend: 'Inside a list-box, move to next list item with TAB OR DOWN ARROW. Move to previous list item with SHIFT+TAB or UP ARROW. Press SPACE or ENTER to select the list option. Press ESC to close the list-box.' // MISSING
},
{
name: 'Editor Element Path Bar', // MISSING
legend: 'Press ${elementsPathFocus} to navigate to the elements path bar. Move to next element button with TAB or RIGHT ARROW. Move to previous button with SHIFT+TAB or LEFT ARROW. Press SPACE or ENTER to select the element in editor.' // MISSING
}
]
},
{
name: 'કમાંડસ',
items: [
{
name: 'અન્ડું કમાંડ',
legend: '$ દબાવો {undo}'
},
{
name: 'ફરી કરો કમાંડ',
legend: '$ દબાવો {redo}'
},
{
name: 'બોલ્દનો કમાંડ',
legend: '$ દબાવો {bold}'
},
{
name: ' Italic command', // MISSING
legend: 'Press ${italic}' // MISSING
},
{
name: ' Underline command', // MISSING
legend: 'Press ${underline}' // MISSING
},
{
name: ' Link command', // MISSING
legend: 'Press ${link}' // MISSING
},
{
name: ' Toolbar Collapse command', // MISSING
legend: 'Press ${toolbarCollapse}' // MISSING
},
{
name: ' Access previous focus space command', // MISSING
legend: 'Press ${accessPreviousSpace} to access the closest unreachable focus space before the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces.' // MISSING
},
{
name: ' Access next focus space command', // MISSING
legend: 'Press ${accessNextSpace} to access the closest unreachable focus space after the caret, for example: two adjacent HR elements. Repeat the key combination to reach distant focus spaces.' // MISSING
},
{
name: ' Accessibility Help', // MISSING
legend: 'Press ${a11yHelp}' // MISSING
},
{
name: ' Paste as plain text', // MISSING
legend: 'Press ${pastetext}', // MISSING
legendEdge: 'Press ${pastetext}, followed by ${paste}' // MISSING
}
]
}
],
tab: 'Tab', // MISSING
pause: 'Pause', // MISSING
capslock: 'Caps Lock', // MISSING
escape: 'Escape', // MISSING
pageUp: 'Page Up', // MISSING
pageDown: 'Page Down', // MISSING
leftArrow: 'Left Arrow', // MISSING
upArrow: 'Up Arrow', // MISSING
rightArrow: 'Right Arrow', // MISSING
downArrow: 'Down Arrow', // MISSING
insert: 'Insert', // MISSING
leftWindowKey: 'Left Windows key', // MISSING
rightWindowKey: 'Right Windows key', // MISSING
selectKey: 'Select key', // MISSING
numpad0: 'Numpad 0', // MISSING
numpad1: 'Numpad 1', // MISSING
numpad2: 'Numpad 2', // MISSING
numpad3: 'Numpad 3', // MISSING
numpad4: 'Numpad 4', // MISSING
numpad5: 'Numpad 5', // MISSING<|fim▁hole|> numpad8: 'Numpad 8', // MISSING
numpad9: 'Numpad 9', // MISSING
multiply: 'Multiply', // MISSING
add: 'Add', // MISSING
subtract: 'Subtract', // MISSING
decimalPoint: 'Decimal Point', // MISSING
divide: 'Divide', // MISSING
f1: 'F1', // MISSING
f2: 'F2', // MISSING
f3: 'F3', // MISSING
f4: 'F4', // MISSING
f5: 'F5', // MISSING
f6: 'F6', // MISSING
f7: 'F7', // MISSING
f8: 'F8', // MISSING
f9: 'F9', // MISSING
f10: 'F10', // MISSING
f11: 'F11', // MISSING
f12: 'F12', // MISSING
numLock: 'Num Lock', // MISSING
scrollLock: 'Scroll Lock', // MISSING
semiColon: 'Semicolon', // MISSING
equalSign: 'Equal Sign', // MISSING
comma: 'Comma', // MISSING
dash: 'Dash', // MISSING
period: 'Period', // MISSING
forwardSlash: 'Forward Slash', // MISSING
graveAccent: 'Grave Accent', // MISSING
openBracket: 'Open Bracket', // MISSING
backSlash: 'Backslash', // MISSING
closeBracket: 'Close Bracket', // MISSING
singleQuote: 'Single Quote' // MISSING
} );<|fim▁end|> | numpad6: 'Numpad 6', // MISSING
numpad7: 'Numpad 7', // MISSING |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
<|fim▁hole|> author="Logan Raarup",
author_email="[email protected]",
description="Amazon AWS API Gateway WSGI wrapper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/logandk/serverless-wsgi",
py_modules=["serverless_wsgi"],
install_requires=["werkzeug>2"],
classifiers=(
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
keywords="wsgi serverless aws lambda api gateway apigw flask django pyramid",
)<|fim▁end|> | setuptools.setup(
name="serverless-wsgi",
version="3.0.0",
python_requires=">3.6", |
<|file_name|>Ekhem.cpp<|end_file_name|><|fim▁begin|>#include <iostream>
#include <string>
#include <tuple>
std::tuple<int,int> wczytaj_liczby();
int main ()
{
std::string opcja;
do {
int a,b;
std::cout << "wybierz opcje przeliczania" << std::endl;
std::cout << "dodawanie, odejmowanie, mnozenie czy dzielenie?" << std::endl;
std::cin >> opcja;
if (opcja=="dodawanie"){
std::tie(a,b)=wczytaj_liczby();
std::cout << "wynik dodawania " << a+b << std::endl;
}
else if (opcja=="odejmowanie"){
std::tie(a,b)=wczytaj_liczby();
std::cout << "wynik odejmowania " << a-b << std::endl;
}
else if (opcja=="mnozenie"){
std::tie(a,b)=wczytaj_liczby();
std::cout << "wynik mnozenia " << a*b << std::endl;
}
else if (opcja=="dzielenie"){
std::tie(a,b)=wczytaj_liczby();
std::cout << "wynik dzielenia " << a/b << std::endl;
<|fim▁hole|>
}
std::tuple<int,int> wczytaj_liczby() {
int a,b;
std::cout << "podaj pierwsza liczbe" << std::endl;
std::cin >> a;
std::cout << "podaj druga liczbe" << std::endl;
std::cin >> b;
return std::make_tuple(a,b);
}<|fim▁end|> | }
else std::cout << "nieznana opcja" << std::endl;
} while(opcja!="koniec"); |
<|file_name|>test_document_missing_changesets.py<|end_file_name|><|fim▁begin|># MajorMajor - Collaborative Document Editing Library
# Copyright (C) 2013 Ritchie Wilson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from majormajor.document import Document
from majormajor.ops.op import Op
from majormajor.changeset import Changeset
class TestDocumentMissingChangesets:
def test_missing_changesets(self):
doc = Document(snapshot='')
doc.HAS_EVENT_LOOP = False
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
root = doc.get_root_changeset()
A = Changeset(doc.get_id(), "dummyuser", [root])
doc.receive_changeset(A)
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
<|fim▁hole|> # Just one Changeset gets put in pending list
B = Changeset(doc.get_id(), "user1", ["C"])
B.set_id("B")
doc.receive_changeset(B)
assert doc.get_ordered_changesets() == [root, A]
assert doc.missing_changesets == set(["C"])
assert doc.pending_new_changesets == [B]
C = Changeset(doc.get_id(), "user1", [A])
C.set_id("C")
doc.receive_changeset(C)
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
assert B.get_parents() == [C]
assert doc.get_ordered_changesets() == [root, A, C, B]
# Now a string of changesets put on pending list
D = Changeset(doc.get_id(), "user1", ["G"])
D.set_id("D")
doc.receive_changeset(D)
assert doc.missing_changesets == set(["G"])
assert doc.pending_new_changesets == [D]
assert doc.get_ordered_changesets() == [root, A, C, B]
E = Changeset(doc.get_id(), "user1", ["D"])
E.set_id("E")
doc.receive_changeset(E)
assert E.get_parents() == [D]
assert doc.missing_changesets == set(["G"])
assert doc.pending_new_changesets == [D, E]
assert doc.get_ordered_changesets() == [root, A, C, B]
F = Changeset(doc.get_id(), "user1", ["E"])
F.set_id("F")
doc.receive_changeset(F)
assert doc.missing_changesets ==set( ["G"])
assert doc.pending_new_changesets == [D, E, F]
assert doc.get_ordered_changesets() == [root, A, C, B]
G = Changeset(doc.get_id(), "user1", ["C"])
G.set_id("G")
doc.receive_changeset(G)
assert doc.missing_changesets == set([])
assert doc.pending_new_changesets == []
assert doc.get_ordered_changesets() == [root, A, C, B, G, D, E, F]
assert doc.get_ordered_changesets() == doc.tree_to_list()<|fim▁end|> | |
<|file_name|>u16.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for unsigned 16-bits integers (`u16` type)<|fim▁hole|>uint_module! { u16, i16, 16 }<|fim▁end|> |
#![stable]
#![doc(primitive = "u16")]
|
<|file_name|>journal.py<|end_file_name|><|fim▁begin|>from django.contrib import admin
from django.contrib.admin.widgets import AdminIntegerFieldWidget
from django.core.validators import MaxValueValidator, MinValueValidator
from modeltranslation.admin import TranslationAdmin
from django.urls import reverse
from django.utils import timezone as tz<|fim▁hole|>from django.utils.html import format_html
from django.utils.translation import gettext as _
from django import forms
from reversion.admin import VersionAdmin
from ..models import Issue
from ..models import Journal
from ..models import JournalInformation
from ..models import JournalType
from ..models import Language
from ..models import Discipline
JOURNAL_INFORMATION_COMPARE_EXCLUDE = [
# Exclude the translated base fields (ie. about) because the translation fields (ie. about_fr)
# are already displayed.
"about",
"contact",
"editorial_policy",
"instruction_for_authors",
"partners",
"publishing_ethics",
"subscriptions",
"team",
# Exclude the auto_now date field.
"updated",
# Exclude ID fields.
"id",
"journal_id",
]
class JournalDisciplineInline(admin.TabularInline):
model = Journal.disciplines.through
def get_field_queryset(self, db, db_field, request):
if db_field.name == "discipline":
# Filter the discipline field's queryset based on the parent journal's type.
if request._obj:
return db_field.remote_field.model._default_manager.using(db).filter(
type__code=request._obj.type.code
)
# If there is no parent journal (during journal creation), return an empty queryset.
else:
return db_field.remote_field.model._default_manager.using(db).none()
return super().get_field_queryset(db, db_field, request)
class JournalForm(forms.ModelForm):
fields = "all"
model = Journal
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Limit `year_of_addition` field values to the current year and the next two years.
now = tz.now()
min_year = now.year
max_year = min_year + 2
self.fields["year_of_addition"].validators = [
MinValueValidator(min_year),
MaxValueValidator(max_year),
]
self.fields["year_of_addition"].widget = AdminIntegerFieldWidget(
attrs={
"min": min_year,
"max": max_year,
},
)
def clean(self):
# In Django < 2.0, CharField stores empty values as empty strings, causing
# a unicity constraint error when multiple objects have an empty value for
# the same field. When we upgrade to Django 2.0, it will not be necessary
# to convert empty strings to None values.
if self.cleaned_data["localidentifier"] == "":
self.cleaned_data["localidentifier"] = None
return self.cleaned_data
class JournalAdmin(admin.ModelAdmin):
form = JournalForm
search_fields = (
"code",
"name",
"issn_print",
"issn_web",
"external_url",
)
list_display = (
"__str__",
"code",
"type",
"open_access",
"external_url",
"active",
)
list_display_links = (
"__str__",
"code",
)
list_filter = (
"collection",
"type",
"paper",
"open_access",
"active",
"is_new",
"year_of_addition",
)
filter_horizontal = ("members",)
fieldsets = [
(
"Identification",
{
"fields": (
(
"collection",
"type",
),
(
"code",
"localidentifier",
),
(
"name",
"subtitle",
),
("is_new", "year_of_addition"),
(
"previous_journal",
"next_journal",
),
(
"issn_print",
"issn_web",
),
("external_url", "redirect_to_external_url"),
),
},
),
(
None,
{
"fields": (
("open_access", "charges_apc", "paper"),
("first_publication_year", "last_publication_year"),
),
},
),
("Membres", {"fields": ("members",)}),
(
"État",
{
"classes": ("collapse",),
"fields": ("active",),
},
),
]
inlines = (JournalDisciplineInline,)
def get_form(self, request, obj=None, change=False, **kwargs):
# Save the journal object on the request to have access to it in `JournalDisciplineInline`.
request._obj = obj
return super().get_form(request, obj, change, **kwargs)
class IssueAdmin(admin.ModelAdmin):
list_display = (
"journal",
"year",
"volume",
"number",
"title",
"localidentifier",
"is_published",
"view_issue_on_site",
)
search_fields = (
"id",
"localidentifier",
)
list_filter = (
"is_published",
"journal__collection",
"journal__name",
)
actions = [
"make_published",
"make_unpublished",
"force_free_access_to_true",
"force_free_access_to_false",
]
def force_free_access_to_true(self, request, queryset):
"""Mark a set of issues as open access"""
queryset.update(force_free_access=True)
force_free_access_to_true.short_description = _(
"Contraindre les numéros sélectionnés en libre d'accès"
)
def force_free_access_to_false(self, request, queryset):
"""Mark a set of issues as not open access"""
queryset.update(force_free_access=False)
force_free_access_to_false.short_description = _(
"Ne pas contraindre ces numéros au libre accès"
)
def view_issue_on_site(self, obj):
""" Display the link leading to the issue on website """
url = reverse(
"public:journal:issue_detail",
kwargs={
"journal_code": obj.journal.code,
"issue_slug": obj.volume_slug,
"localidentifier": obj.localidentifier,
},
)
if not obj.is_published and obj.journal.collection.is_main_collection:
url = "{url}?ticket={ticket}".format(url=url, ticket=obj.prepublication_ticket)
return format_html("<a href={}>{}</a>", url, _("Voir sur le site"))
view_issue_on_site.short_description = _("Voir le numéro sur le site")
def get_readonly_fields(self, request, obj=None):
return self.readonly_fields + ("is_published",)
class JournalInformationAdminForm(forms.ModelForm):
class Meta:
model = JournalInformation
fields = "__all__"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Exclude French & English from other_languages field. These languages are set in the
# main_languages field.
self.fields["other_languages"].queryset = Language.objects.exclude(id__in=[1, 2])
class JournalInformationAdmin(VersionAdmin, TranslationAdmin):
form = JournalInformationAdminForm
class JournalTypeAdmin(TranslationAdmin):
pass
class DisciplineAdmin(TranslationAdmin):
def get_types(self, obj):
return ", ".join([t.name for t in obj.type.all()])
list_display = [
"name",
"get_types",
]
list_filter = [
"type",
]
admin.site.register(Journal, JournalAdmin)
admin.site.register(Issue, IssueAdmin)
admin.site.register(JournalInformation, JournalInformationAdmin)
admin.site.unregister(JournalType)
admin.site.register(JournalType, JournalTypeAdmin)
admin.site.register(Discipline, DisciplineAdmin)<|fim▁end|> | |
<|file_name|>SffIO.py<|end_file_name|><|fim▁begin|># Copyright 2009-2010 by Peter Cock. All rights reserved.
# Based on code contributed and copyright 2009 by Jose Blanca (COMAV-UPV).
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the binary Standard Flowgram Format (SFF) file format.
SFF was designed by 454 Life Sciences (Roche), the Whitehead Institute for
Biomedical Research and the Wellcome Trust Sanger Institute. You are expected
to use this module via the Bio.SeqIO functions under the format name "sff" (or
"sff-trim" as described below).
For example, to iterate over the records in an SFF file,
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JWQ7T 265 tcagGGTCTACATGTTGGTT...
E3MFGYR02JA6IL 271 tcagTTTTTTTTGGAAAGGA...
E3MFGYR02JHD4H 310 tcagAAAGACAAGTGGTATC...
E3MFGYR02GFKUC 299 tcagCGGCCGGGCCTCTCAT...
E3MFGYR02FTGED 281 tcagTGGTAATGGGGGGAAA...
E3MFGYR02FR9G7 261 tcagCTCCGTAAGAAGGTGC...
E3MFGYR02GAZMS 278 tcagAAAGAAGTAAGGTAAA...
E3MFGYR02HHZ8O 221 tcagACTTTCTTCTTTACCG...
E3MFGYR02GPGB1 269 tcagAAGCAGTGGTATCAAC...
E3MFGYR02F7Z7G 219 tcagAATCATCCACTTTTTA...
Each SeqRecord object will contain all the annotation from the SFF file,
including the PHRED quality scores.
>>> print record.id, len(record)
E3MFGYR02F7Z7G 219
>>> print record.seq[:10], "..."
tcagAATCAT ...
>>> print record.letter_annotations["phred_quality"][:10], "..."
[22, 21, 23, 28, 26, 15, 12, 21, 28, 21] ...
Notice that the sequence is given in mixed case, the central upper case region
corresponds to the trimmed sequence. This matches the output of the Roche
tools (and the 3rd party tool sff_extract) for SFF to FASTA.
>>> print record.annotations["clip_qual_left"]
4
>>> print record.annotations["clip_qual_right"]
134
>>> print record.seq[:4]
tcag
>>> print record.seq[4:20], "...", record.seq[120:134]
AATCATCCACTTTTTA ... CAAAACACAAACAG
>>> print record.seq[134:]
atcttatcaacaaaactcaaagttcctaactgagacacgcaacaggggataagacaaggcacacaggggataggnnnnnnnnnnn
The annotations dictionary also contains any adapter clip positions
(usually zero), and information about the flows. e.g.
>>> print record.annotations["flow_key"]
TCAG
>>> print record.annotations["flow_values"][:10], "..."
(83, 1, 128, 7, 4, 84, 6, 106, 3, 172) ...
>>> print len(record.annotations["flow_values"])
400
>>> print record.annotations["flow_index"][:10], "..."
(1, 2, 3, 2, 2, 0, 3, 2, 3, 3) ...
>>> print len(record.annotations["flow_index"])
219
As a convenience method, you can read the file with SeqIO format name "sff-trim"
instead of "sff" to get just the trimmed sequences (without any annotation
except for the PHRED quality scores):
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JWQ7T 260 GGTCTACATGTTGGTTAACC...
E3MFGYR02JA6IL 265 TTTTTTTTGGAAAGGAAAAC...
E3MFGYR02JHD4H 292 AAAGACAAGTGGTATCAACG...
E3MFGYR02GFKUC 295 CGGCCGGGCCTCTCATCGGT...
E3MFGYR02FTGED 277 TGGTAATGGGGGGAAATTTA...
E3MFGYR02FR9G7 256 CTCCGTAAGAAGGTGCTGCC...
E3MFGYR02GAZMS 271 AAAGAAGTAAGGTAAATAAC...
E3MFGYR02HHZ8O 150 ACTTTCTTCTTTACCGTAAC...
E3MFGYR02GPGB1 221 AAGCAGTGGTATCAACGCAG...
E3MFGYR02F7Z7G 130 AATCATCCACTTTTTAACGT...
Looking at the final record in more detail, note how this differs to the
example above:
<|fim▁hole|> AATCATCCAC ...
>>> print record.letter_annotations["phred_quality"][:10], "..."
[26, 15, 12, 21, 28, 21, 36, 28, 27, 27] ...
>>> print record.annotations
{}
You might use the Bio.SeqIO.convert() function to convert the (trimmed) SFF
reads into a FASTQ file (or a FASTA file and a QUAL file), e.g.
>>> from Bio import SeqIO
>>> from StringIO import StringIO
>>> out_handle = StringIO()
>>> count = SeqIO.convert("Roche/E3MFGYR02_random_10_reads.sff", "sff",
... out_handle, "fastq")
>>> print "Converted %i records" % count
Converted 10 records
The output FASTQ file would start like this:
>>> print "%s..." % out_handle.getvalue()[:50]
@E3MFGYR02JWQ7T
tcagGGTCTACATGTTGGTTAACCCGTACTGATT...
Bio.SeqIO.index() provides memory efficient random access to the reads in an
SFF file by name. SFF files can include an index within the file, which can
be read in making this very fast. If the index is missing (or in a format not
yet supported in Biopython) the file is indexed by scanning all the reads -
which is a little slower. For example,
>>> from Bio import SeqIO
>>> reads = SeqIO.index("Roche/E3MFGYR02_random_10_reads.sff", "sff")
>>> record = reads["E3MFGYR02JHD4H"]
>>> print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 310 tcagAAAGACAAGTGGTATC...
Or, using the trimmed reads:
>>> from Bio import SeqIO
>>> reads = SeqIO.index("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim")
>>> record = reads["E3MFGYR02JHD4H"]
>>> print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 292 AAAGACAAGTGGTATCAACG...
You can also use the Bio.SeqIO.write() function with the "sff" format. Note
that this requires all the flow information etc, and thus is probably only
useful for SeqRecord objects originally from reading another SFF file (and
not the trimmed SeqRecord objects from parsing an SFF file as "sff-trim").
As an example, let's pretend this example SFF file represents some DNA which
was pre-amplified with a PCR primers AAAGANNNNN. The following script would
produce a sub-file containing all those reads whose post-quality clipping
region (i.e. the sequence after trimming) starts with AAAGA exactly (the non-
degenerate bit of this pretend primer):
>>> from Bio import SeqIO
>>> records = (record for record in
... SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff","sff")
... if record.seq[record.annotations["clip_qual_left"]:].startswith("AAAGA"))
>>> count = SeqIO.write(records, "temp_filtered.sff", "sff")
>>> print "Selected %i records" % count
Selected 2 records
Of course, for an assembly you would probably want to remove these primers.
If you want FASTA or FASTQ output, you could just slice the SeqRecord. However,
if you want SFF output we have to preserve all the flow information - the trick
is just to adjust the left clip position!
>>> from Bio import SeqIO
>>> def filter_and_trim(records, primer):
... for record in records:
... if record.seq[record.annotations["clip_qual_left"]:].startswith(primer):
... record.annotations["clip_qual_left"] += len(primer)
... yield record
>>> records = SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff")
>>> count = SeqIO.write(filter_and_trim(records,"AAAGA"),
... "temp_filtered.sff", "sff")
>>> print "Selected %i records" % count
Selected 2 records
We can check the results, note the lower case clipped region now includes the "AAAGA"
sequence:
>>> for record in SeqIO.parse("temp_filtered.sff", "sff"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 310 tcagaaagaCAAGTGGTATC...
E3MFGYR02GAZMS 278 tcagaaagaAGTAAGGTAAA...
>>> for record in SeqIO.parse("temp_filtered.sff", "sff-trim"):
... print record.id, len(record), record.seq[:20]+"..."
E3MFGYR02JHD4H 287 CAAGTGGTATCAACGCAGAG...
E3MFGYR02GAZMS 266 AGTAAGGTAAATAACAAACG...
>>> import os
>>> os.remove("temp_filtered.sff")
For a description of the file format, please see the Roche manuals and:
http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=formats
"""
from Bio.SeqIO.Interfaces import SequenceWriter
from Bio import Alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import struct
import sys
from Bio._py3k import _bytes_to_string, _as_bytes
_null = _as_bytes("\0")
_sff = _as_bytes(".sff")
_hsh = _as_bytes(".hsh")
_srt = _as_bytes(".srt")
_mft = _as_bytes(".mft")
#This is a hack because char 255 is special in unicode:
try:
#This works on Python 2.6+ or Python 3.0
_flag = eval(r'b"\xff"')
except SyntaxError:
#Must be on Python 2.4 or 2.5
_flag = "\xff" #Char 255
def _sff_file_header(handle):
"""Read in an SFF file header (PRIVATE).
Assumes the handle is at the start of the file, will read forwards
though the header and leave the handle pointing at the first record.
Returns a tuple of values from the header (header_length, index_offset,
index_length, number_of_reads, flows_per_read, flow_chars, key_sequence)
>>> handle = open("Roche/greek.sff", "rb")
>>> values = _sff_file_header(handle)
>>> print values[0]
840
>>> print values[1]
65040
>>> print values[2]
256
>>> print values[3]
24
>>> print values[4]
800
>>> values[-1]
'TCAG'
"""
if hasattr(handle,"mode") and "U" in handle.mode.upper():
raise ValueError("SFF files must NOT be opened in universal new "
"lines mode. Binary mode is recommended (although "
"on Unix the default mode is also fine).")
elif hasattr(handle,"mode") and "B" not in handle.mode.upper() \
and sys.platform == "win32":
raise ValueError("SFF files must be opened in binary mode on Windows")
#file header (part one)
#use big endiean encdoing >
#magic_number I
#version 4B
#index_offset Q
#index_length I
#number_of_reads I
#header_length H
#key_length H
#number_of_flows_per_read H
#flowgram_format_code B
#[rest of file header depends on the number of flows and how many keys]
fmt = '>4s4BQIIHHHB'
assert 31 == struct.calcsize(fmt)
data = handle.read(31)
if not data:
raise ValueError("Empty file.")
elif len(data) < 13:
raise ValueError("File too small to hold a valid SFF header.")
magic_number, ver0, ver1, ver2, ver3, index_offset, index_length, \
number_of_reads, header_length, key_length, number_of_flows_per_read, \
flowgram_format = struct.unpack(fmt, data)
if magic_number in [_hsh, _srt, _mft]:
#Probably user error, calling Bio.SeqIO.parse() twice!
raise ValueError("Handle seems to be at SFF index block, not start")
if magic_number != _sff: # 779314790
raise ValueError("SFF file did not start '.sff', but %s" \
% repr(magic_number))
if (ver0, ver1, ver2, ver3) != (0, 0, 0, 1):
raise ValueError("Unsupported SFF version in header, %i.%i.%i.%i" \
% (ver0, ver1, ver2, ver3))
if flowgram_format != 1:
raise ValueError("Flowgram format code %i not supported" \
% flowgram_format)
if (index_offset!=0) ^ (index_length!=0):
raise ValueError("Index offset %i but index length %i" \
% (index_offset, index_length))
flow_chars = _bytes_to_string(handle.read(number_of_flows_per_read))
key_sequence = _bytes_to_string(handle.read(key_length))
#According to the spec, the header_length field should be the total number
#of bytes required by this set of header fields, and should be equal to
#"31 + number_of_flows_per_read + key_length" rounded up to the next value
#divisible by 8.
assert header_length % 8 == 0
padding = header_length - number_of_flows_per_read - key_length - 31
assert 0 <= padding < 8, padding
if handle.read(padding).count(_null) != padding:
raise ValueError("Post header %i byte padding region contained data" \
% padding)
return header_length, index_offset, index_length, \
number_of_reads, number_of_flows_per_read, \
flow_chars, key_sequence
#This is a generator function!
def _sff_do_slow_index(handle):
"""Generates an index by scanning though all the reads in an SFF file (PRIVATE).
This is a slow but generic approach if we can't parse the provided index
(if present).
Will use the handle seek/tell functions.
"""
handle.seek(0)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
#Now on to the reads...
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
#NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 #Important for padding calc later!
for read in range(number_of_reads):
record_offset = handle.tell()
if record_offset == index_offset:
#Found index block within reads, ignore it:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
record_offset = offset
#assert record_offset%8 == 0 #Worth checking, but slow
#First the fixed header
data = handle.read(read_header_size)
read_header_length, name_length, seq_len, clip_qual_left, \
clip_qual_right, clip_adapter_left, clip_adapter_right \
= struct.unpack(read_header_fmt, data)
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i:\n%s" \
% (read_header_length, repr(data)))
#now the name and any padding (remainder of header)
name = _bytes_to_string(handle.read(name_length))
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
raise ValueError("Post name %i byte padding region contained data" \
% padding)
assert record_offset + read_header_length == handle.tell()
#now the flowgram values, flowgram index, bases and qualities
size = read_flow_size + 3*seq_len
handle.seek(size, 1)
#now any padding...
padding = size % 8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
raise ValueError("Post quality %i byte padding region contained data" \
% padding)
#print read, name, record_offset
yield name, record_offset
if handle.tell() % 8 != 0:
raise ValueError("After scanning reads, did not end on a multiple of 8")
def _sff_find_roche_index(handle):
"""Locate any existing Roche style XML meta data and read index (PRIVATE).
Makes a number of hard coded assumptions based on reverse engineered SFF
files from Roche 454 machines.
Returns a tuple of read count, SFF "index" offset and size, XML offset
and size, and the actual read index offset and size.
Raises a ValueError for unsupported or non-Roche index blocks.
"""
handle.seek(0)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
assert handle.tell() == header_length
if not index_offset or not index_offset:
raise ValueError("No index present in this SFF file")
#Now jump to the header...
handle.seek(index_offset)
fmt = ">4s4B"
fmt_size = struct.calcsize(fmt)
data = handle.read(fmt_size)
if not data:
raise ValueError("Premature end of file? Expected index of size %i at offest %i, found nothing" \
% (index_length, index_offset))
if len(data) < fmt_size:
raise ValueError("Premature end of file? Expected index of size %i at offest %i, found %s" \
% (index_length, index_offset, repr(data)))
magic_number, ver0, ver1, ver2, ver3 = struct.unpack(fmt, data)
if magic_number == _mft: # 778921588
#Roche 454 manifest index
#This is typical from raw Roche 454 SFF files (2009), and includes
#both an XML manifest and the sorted index.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
#This is "1.00" as a string
raise ValueError("Unsupported version in .mft index header, %i.%i.%i.%i" \
% (ver0, ver1, ver2, ver3))
fmt2 = ">LL"
fmt2_size = struct.calcsize(fmt2)
xml_size, data_size = struct.unpack(fmt2, handle.read(fmt2_size))
if index_length != fmt_size + fmt2_size + xml_size + data_size:
raise ValueError("Problem understanding .mft index header, %i != %i + %i + %i + %i" \
% (index_length, fmt_size, fmt2_size, xml_size, data_size))
return number_of_reads, header_length, \
index_offset, index_length, \
index_offset + fmt_size + fmt2_size, xml_size, \
index_offset + fmt_size + fmt2_size + xml_size, data_size
elif magic_number == _srt: #779317876
#Roche 454 sorted index
#I've had this from Roche tool sfffile when the read identifiers
#had nonstandard lengths and there was no XML manifest.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
#This is "1.00" as a string
raise ValueError("Unsupported version in .srt index header, %i.%i.%i.%i" \
% (ver0, ver1, ver2, ver3))
data = handle.read(4)
if data != _null*4:
raise ValueError("Did not find expected null four bytes in .srt index")
return number_of_reads, header_length, \
index_offset, index_length, \
0, 0, \
index_offset + fmt_size + 4, index_length - fmt_size - 4
elif magic_number == _hsh:
raise ValueError("Hash table style indexes (.hsh) in SFF files are "
"not (yet) supported")
else:
raise ValueError("Unknown magic number %s in SFF index header:\n%s" \
% (repr(magic_number), repr(data)))
def _sff_read_roche_index_xml(handle):
"""Reads any existing Roche style XML manifest data in the SFF "index" (PRIVATE, DEPRECATED).
Will use the handle seek/tell functions. Returns a string.
This has been replaced by ReadRocheXmlManifest. We would normally just
delete an old private function without warning, but I believe some people
are using this so we'll handle this with a deprecation warning.
"""
import warnings
warnings.warn("Private function _sff_read_roche_index_xml is deprecated. "
"Use new public function ReadRocheXmlManifest instead",
DeprecationWarning)
return ReadRocheXmlManifest(handle)
def ReadRocheXmlManifest(handle):
"""Reads any Roche style XML manifest data in the SFF "index".
The SFF file format allows for multiple different index blocks, and Roche
took advantage of this to define their own index block wich also embeds
an XML manifest string. This is not a publically documented extension to
the SFF file format, this was reverse engineered.
The handle should be to an SFF file opened in binary mode. This function
will use the handle seek/tell functions and leave the handle in an
arbitrary location.
Any XML manifest found is returned as a Python string, which you can then
parse as appropriate, or reuse when writing out SFF files with the
SffWriter class.
Returns a string, or raises a ValueError if an Roche manifest could not be
found.
"""
number_of_reads, header_length, index_offset, index_length, xml_offset, \
xml_size, read_index_offset, read_index_size = _sff_find_roche_index(handle)
if not xml_offset or not xml_size:
raise ValueError("No XML manifest found")
handle.seek(xml_offset)
return _bytes_to_string(handle.read(xml_size))
#This is a generator function!
def _sff_read_roche_index(handle):
"""Reads any existing Roche style read index provided in the SFF file (PRIVATE).
Will use the handle seek/tell functions.
This works on ".srt1.00" and ".mft1.00" style Roche SFF index blocks.
Roche SFF indices use base 255 not 256, meaning we see bytes in range the
range 0 to 254 only. This appears to be so that byte 0xFF (character 255)
can be used as a marker character to separate entries (required if the
read name lengths vary).
Note that since only four bytes are used for the read offset, this is
limited to 255^4 bytes (nearly 4GB). If you try to use the Roche sfffile
tool to combine SFF files beyound this limit, they issue a warning and
omit the index (and manifest).
"""
number_of_reads, header_length, index_offset, index_length, xml_offset, \
xml_size, read_index_offset, read_index_size = _sff_find_roche_index(handle)
#Now parse the read index...
handle.seek(read_index_offset)
fmt = ">5B"
for read in range(number_of_reads):
#TODO - Be more aware of when the index should end?
data = handle.read(6)
while True:
more = handle.read(1)
if not more:
raise ValueError("Premature end of file!")
data += more
if more == _flag: break
assert data[-1:] == _flag, data[-1:]
name = _bytes_to_string(data[:-6])
off4, off3, off2, off1, off0 = struct.unpack(fmt, data[-6:-1])
offset = off0 + 255*off1 + 65025*off2 + 16581375*off3
if off4:
#Could in theory be used as a fifth piece of offset information,
#i.e. offset =+ 4228250625L*off4, but testing the Roche tools this
#is not the case. They simple don't support such large indexes.
raise ValueError("Expected a null terminator to the read name.")
yield name, offset
if handle.tell() != read_index_offset + read_index_size:
raise ValueError("Problem with index length? %i vs %i" \
% (handle.tell(), read_index_offset + read_index_size))
def _sff_read_seq_record(handle, number_of_flows_per_read, flow_chars,
key_sequence, alphabet, trim=False):
"""Parse the next read in the file, return data as a SeqRecord (PRIVATE)."""
#Now on to the reads...
#the read header format (fixed part):
#read_header_length H
#name_length H
#seq_len I
#clip_qual_left H
#clip_qual_right H
#clip_adapter_left H
#clip_adapter_right H
#[rest of read header depends on the name length etc]
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
read_header_length, name_length, seq_len, clip_qual_left, \
clip_qual_right, clip_adapter_left, clip_adapter_right \
= struct.unpack(read_header_fmt, handle.read(read_header_size))
if clip_qual_left:
clip_qual_left -= 1 #python counting
if clip_adapter_left:
clip_adapter_left -= 1 #python counting
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i" \
% read_header_length)
#now the name and any padding (remainder of header)
name = _bytes_to_string(handle.read(name_length))
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
raise ValueError("Post name %i byte padding region contained data" \
% padding)
#now the flowgram values, flowgram index, bases and qualities
#NOTE - assuming flowgram_format==1, which means struct type H
flow_values = handle.read(read_flow_size) #unpack later if needed
temp_fmt = ">%iB" % seq_len # used for flow index and quals
flow_index = handle.read(seq_len) #unpack later if needed
seq = _bytes_to_string(handle.read(seq_len)) #TODO - Use bytes in Seq?
quals = list(struct.unpack(temp_fmt, handle.read(seq_len)))
#now any padding...
padding = (read_flow_size + seq_len*3)%8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
raise ValueError("Post quality %i byte padding region contained data" \
% padding)
#Now build a SeqRecord
if trim:
seq = seq[clip_qual_left:clip_qual_right].upper()
quals = quals[clip_qual_left:clip_qual_right]
#Don't record the clipping values, flow etc, they make no sense now:
annotations = {}
else:
#This use of mixed case mimics the Roche SFF tool's FASTA output
seq = seq[:clip_qual_left].lower() + \
seq[clip_qual_left:clip_qual_right].upper() + \
seq[clip_qual_right:].lower()
annotations = {"flow_values":struct.unpack(read_flow_fmt, flow_values),
"flow_index":struct.unpack(temp_fmt, flow_index),
"flow_chars":flow_chars,
"flow_key":key_sequence,
"clip_qual_left":clip_qual_left,
"clip_qual_right":clip_qual_right,
"clip_adapter_left":clip_adapter_left,
"clip_adapter_right":clip_adapter_right}
record = SeqRecord(Seq(seq, alphabet),
id=name,
name=name,
description="",
annotations=annotations)
#Dirty trick to speed up this line:
#record.letter_annotations["phred_quality"] = quals
dict.__setitem__(record._per_letter_annotations,
"phred_quality", quals)
#TODO - adaptor clipping
#Return the record and then continue...
return record
#This is a generator function!
def SffIterator(handle, alphabet=Alphabet.generic_dna, trim=False):
"""Iterate over Standard Flowgram Format (SFF) reads (as SeqRecord objects).
handle - input file, an SFF file, e.g. from Roche 454 sequencing.
This must NOT be opened in universal read lines mode!
alphabet - optional alphabet, defaults to generic DNA.
trim - should the sequences be trimmed?
The resulting SeqRecord objects should match those from a paired FASTA
and QUAL file converted from the SFF file using the Roche 454 tool
ssfinfo. i.e. The sequence will be mixed case, with the trim regions
shown in lower case.
This function is used internally via the Bio.SeqIO functions:
>>> from Bio import SeqIO
>>> handle = open("Roche/E3MFGYR02_random_10_reads.sff", "rb")
>>> for record in SeqIO.parse(handle, "sff"):
... print record.id, len(record)
E3MFGYR02JWQ7T 265
E3MFGYR02JA6IL 271
E3MFGYR02JHD4H 310
E3MFGYR02GFKUC 299
E3MFGYR02FTGED 281
E3MFGYR02FR9G7 261
E3MFGYR02GAZMS 278
E3MFGYR02HHZ8O 221
E3MFGYR02GPGB1 269
E3MFGYR02F7Z7G 219
>>> handle.close()
You can also call it directly:
>>> handle = open("Roche/E3MFGYR02_random_10_reads.sff", "rb")
>>> for record in SffIterator(handle):
... print record.id, len(record)
E3MFGYR02JWQ7T 265
E3MFGYR02JA6IL 271
E3MFGYR02JHD4H 310
E3MFGYR02GFKUC 299
E3MFGYR02FTGED 281
E3MFGYR02FR9G7 261
E3MFGYR02GAZMS 278
E3MFGYR02HHZ8O 221
E3MFGYR02GPGB1 269
E3MFGYR02F7Z7G 219
>>> handle.close()
Or, with the trim option:
>>> handle = open("Roche/E3MFGYR02_random_10_reads.sff", "rb")
>>> for record in SffIterator(handle, trim=True):
... print record.id, len(record)
E3MFGYR02JWQ7T 260
E3MFGYR02JA6IL 265
E3MFGYR02JHD4H 292
E3MFGYR02GFKUC 295
E3MFGYR02FTGED 277
E3MFGYR02FR9G7 256
E3MFGYR02GAZMS 271
E3MFGYR02HHZ8O 150
E3MFGYR02GPGB1 221
E3MFGYR02F7Z7G 130
>>> handle.close()
"""
if isinstance(Alphabet._get_base_alphabet(alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Invalid alphabet, SFF files do not hold proteins.")
if isinstance(Alphabet._get_base_alphabet(alphabet),
Alphabet.RNAAlphabet):
raise ValueError("Invalid alphabet, SFF files do not hold RNA.")
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
#Now on to the reads...
#the read header format (fixed part):
#read_header_length H
#name_length H
#seq_len I
#clip_qual_left H
#clip_qual_right H
#clip_adapter_left H
#clip_adapter_right H
#[rest of read header depends on the name length etc]
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 #Important for padding calc later!
#The spec allows for the index block to be before or even in the middle
#of the reads. We can check that if we keep track of our position
#in the file...
for read in range(number_of_reads):
if index_offset and handle.tell() == index_offset:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
#Now that we've done this, we don't need to do it again. Clear
#the index_offset so we can skip extra handle.tell() calls:
index_offset = 0
yield _sff_read_seq_record(handle,
number_of_flows_per_read,
flow_chars,
key_sequence,
alphabet,
trim)
#The following is not essential, but avoids confusing error messages
#for the user if they try and re-parse the same handle.
if index_offset and handle.tell() == index_offset:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
#Should now be at the end of the file...
if handle.read(1):
raise ValueError("Additional data at end of SFF file")
#This is a generator function!
def _SffTrimIterator(handle, alphabet=Alphabet.generic_dna):
"""Iterate over SFF reads (as SeqRecord objects) with trimming (PRIVATE)."""
return SffIterator(handle, alphabet, trim=True)
class SffWriter(SequenceWriter):
"""SFF file writer."""
def __init__(self, handle, index=True, xml=None):
"""Creates the writer object.
handle - Output handle, ideally in binary write mode.
index - Boolean argument, should we try and write an index?
xml - Optional string argument, xml manifest to be recorded in the index
block (see function ReadRocheXmlManifest for reading this data).
"""
if hasattr(handle,"mode") and "U" in handle.mode.upper():
raise ValueError("SFF files must NOT be opened in universal new "
"lines mode. Binary mode is required")
elif hasattr(handle,"mode") and "B" not in handle.mode.upper():
raise ValueError("SFF files must be opened in binary mode")
self.handle = handle
self._xml = xml
if index:
self._index = []
else:
self._index = None
def write_file(self, records):
"""Use this to write an entire file containing the given records."""
try:
self._number_of_reads = len(records)
except TypeError:
self._number_of_reads = 0 #dummy value
if not hasattr(self.handle, "seek") \
or not hasattr(self.handle, "tell"):
raise ValueError("A handle with a seek/tell methods is "
"required in order to record the total "
"record count in the file header (once it "
"is known at the end).")
if self._index is not None and \
not (hasattr(self.handle, "seek") and hasattr(self.handle, "tell")):
import warnings
warnings.warn("A handle with a seek/tell methods is required in "
"order to record an SFF index.")
self._index = None
self._index_start = 0
self._index_length = 0
if not hasattr(records, "next"):
records = iter(records)
#Get the first record in order to find the flow information
#we will need for the header.
try:
record = records.next()
except StopIteration:
record = None
if record is None:
#No records -> empty SFF file (or an error)?
#We can't write a header without the flow information.
#return 0
raise ValueError("Need at least one record for SFF output")
try:
self._key_sequence = _as_bytes(record.annotations["flow_key"])
self._flow_chars = _as_bytes(record.annotations["flow_chars"])
self._number_of_flows_per_read = len(self._flow_chars)
except KeyError:
raise ValueError("Missing SFF flow information")
self.write_header()
self.write_record(record)
count = 1
for record in records:
self.write_record(record)
count += 1
if self._number_of_reads == 0:
#Must go back and record the record count...
offset = self.handle.tell()
self.handle.seek(0)
self._number_of_reads = count
self.write_header()
self.handle.seek(offset) #not essential?
else:
assert count == self._number_of_reads
if self._index is not None:
self._write_index()
return count
def _write_index(self):
assert len(self._index)==self._number_of_reads
handle = self.handle
self._index.sort()
self._index_start = handle.tell() #need for header
#XML...
if self._xml is not None:
xml = _as_bytes(self._xml)
else:
from Bio import __version__
xml = "<!-- This file was output with Biopython %s -->\n" % __version__
xml += "<!-- This XML and index block attempts to mimic Roche SFF files -->\n"
xml += "<!-- This file may be a combination of multiple SFF files etc -->\n"
xml = _as_bytes(xml)
xml_len = len(xml)
#Write to the file...
fmt = ">I4BLL"
fmt_size = struct.calcsize(fmt)
handle.write(_null*fmt_size + xml) #will come back later to fill this
fmt2 = ">6B"
assert 6 == struct.calcsize(fmt2)
self._index.sort()
index_len = 0 #don't know yet!
for name, offset in self._index:
#Roche files record the offsets using base 255 not 256.
#See comments for parsing the index block. There may be a faster
#way to code this, but we can't easily use shifts due to odd base
off3 = offset
off0 = off3 % 255
off3 -= off0
off1 = off3 % 65025
off3 -= off1
off2 = off3 % 16581375
off3 -= off2
assert offset == off0 + off1 + off2 + off3, \
"%i -> %i %i %i %i" % (offset, off0, off1, off2, off3)
off3, off2, off1, off0 = off3//16581375, off2//65025, \
off1//255, off0
assert off0 < 255 and off1 < 255 and off2 < 255 and off3 < 255, \
"%i -> %i %i %i %i" % (offset, off0, off1, off2, off3)
handle.write(name + struct.pack(fmt2, 0, \
off3, off2, off1, off0, 255))
index_len += len(name) + 6
#Note any padding in not included:
self._index_length = fmt_size + xml_len + index_len #need for header
#Pad out to an 8 byte boundary (although I have noticed some
#real Roche SFF files neglect to do this depsite their manual
#suggesting this padding should be there):
if self._index_length % 8:
padding = 8 - (self._index_length%8)
handle.write(_null*padding)
else:
padding = 0
offset = handle.tell()
assert offset == self._index_start + self._index_length + padding, \
"%i vs %i + %i + %i" % (offset, self._index_start, \
self._index_length, padding)
#Must now go back and update the index header with index size...
handle.seek(self._index_start)
handle.write(struct.pack(fmt, 778921588, #magic number
49,46,48,48, #Roche index version, "1.00"
xml_len, index_len) + xml)
#Must now go back and update the header...
handle.seek(0)
self.write_header()
handle.seek(offset) #not essential?
def write_header(self):
#Do header...
key_length = len(self._key_sequence)
#file header (part one)
#use big endiean encdoing >
#magic_number I
#version 4B
#index_offset Q
#index_length I
#number_of_reads I
#header_length H
#key_length H
#number_of_flows_per_read H
#flowgram_format_code B
#[rest of file header depends on the number of flows and how many keys]
fmt = '>I4BQIIHHHB%is%is' % (self._number_of_flows_per_read, key_length)
#According to the spec, the header_length field should be the total
#number of bytes required by this set of header fields, and should be
#equal to "31 + number_of_flows_per_read + key_length" rounded up to
#the next value divisible by 8.
if struct.calcsize(fmt) % 8 == 0:
padding = 0
else:
padding = 8 - (struct.calcsize(fmt) % 8)
header_length = struct.calcsize(fmt) + padding
assert header_length % 8 == 0
header = struct.pack(fmt, 779314790, #magic number 0x2E736666
0, 0, 0, 1, #version
self._index_start, self._index_length,
self._number_of_reads,
header_length, key_length,
self._number_of_flows_per_read,
1, #the only flowgram format code we support
self._flow_chars, self._key_sequence)
self.handle.write(header + _null*padding)
def write_record(self, record):
"""Write a single additional record to the output file.
This assumes the header has been done.
"""
#Basics
name = _as_bytes(record.id)
name_len = len(name)
seq = _as_bytes(str(record.seq).upper())
seq_len = len(seq)
#Qualities
try:
quals = record.letter_annotations["phred_quality"]
except KeyError:
raise ValueError("Missing PHRED qualities information")
#Flow
try:
flow_values = record.annotations["flow_values"]
flow_index = record.annotations["flow_index"]
if self._key_sequence != _as_bytes(record.annotations["flow_key"]) \
or self._flow_chars != _as_bytes(record.annotations["flow_chars"]):
raise ValueError("Records have inconsistent SFF flow data")
except KeyError:
raise ValueError("Missing SFF flow information")
except AttributeError:
raise ValueError("Header not written yet?")
#Clipping
try:
clip_qual_left = record.annotations["clip_qual_left"]
if clip_qual_left:
clip_qual_left += 1
clip_qual_right = record.annotations["clip_qual_right"]
clip_adapter_left = record.annotations["clip_adapter_left"]
if clip_adapter_left:
clip_adapter_left += 1
clip_adapter_right = record.annotations["clip_adapter_right"]
except KeyError:
raise ValueError("Missing SFF clipping information")
#Capture information for index
if self._index is not None:
offset = self.handle.tell()
#Check the position of the final record (before sort by name)
#See comments earlier about how base 255 seems to be used.
#This means the limit is 255**4 + 255**3 +255**2 + 255**1
if offset > 4244897280:
import warnings
warnings.warn("Read %s has file offset %i, which is too large "
"to store in the Roche SFF index structure. No "
"index block will be recorded." % (name, offset))
#No point recoring the offsets now
self._index = None
else:
self._index.append((name, self.handle.tell()))
#the read header format (fixed part):
#read_header_length H
#name_length H
#seq_len I
#clip_qual_left H
#clip_qual_right H
#clip_adapter_left H
#clip_adapter_right H
#[rest of read header depends on the name length etc]
#name
#flow values
#flow index
#sequence
#padding
read_header_fmt = '>2HI4H%is' % name_len
if struct.calcsize(read_header_fmt) % 8 == 0:
padding = 0
else:
padding = 8 - (struct.calcsize(read_header_fmt) % 8)
read_header_length = struct.calcsize(read_header_fmt) + padding
assert read_header_length % 8 == 0
data = struct.pack(read_header_fmt,
read_header_length,
name_len, seq_len,
clip_qual_left, clip_qual_right,
clip_adapter_left, clip_adapter_right,
name) + _null*padding
assert len(data) == read_header_length
#now the flowgram values, flowgram index, bases and qualities
#NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % self._number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
temp_fmt = ">%iB" % seq_len # used for flow index and quals
data += struct.pack(read_flow_fmt, *flow_values) \
+ struct.pack(temp_fmt, *flow_index) \
+ seq \
+ struct.pack(temp_fmt, *quals)
#now any final padding...
padding = (read_flow_size + seq_len*3)%8
if padding:
padding = 8 - padding
self.handle.write(data + _null*padding)
if __name__ == "__main__":
print "Running quick self test"
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.sff"
metadata = ReadRocheXmlManifest(open(filename, "rb"))
index1 = sorted(_sff_read_roche_index(open(filename, "rb")))
index2 = sorted(_sff_do_slow_index(open(filename, "rb")))
assert index1 == index2
assert len(index1) == len(list(SffIterator(open(filename, "rb"))))
from StringIO import StringIO
try:
#This is in Python 2.6+, and is essential on Python 3
from io import BytesIO
except ImportError:
BytesIO = StringIO
assert len(index1) == len(list(SffIterator(BytesIO(open(filename,"rb").read()))))
if sys.platform != "win32":
assert len(index1) == len(list(SffIterator(open(filename, "r"))))
index2 = sorted(_sff_read_roche_index(open(filename)))
assert index1 == index2
index2 = sorted(_sff_do_slow_index(open(filename)))
assert index1 == index2
assert len(index1) == len(list(SffIterator(open(filename))))
assert len(index1) == len(list(SffIterator(BytesIO(open(filename,"r").read()))))
assert len(index1) == len(list(SffIterator(BytesIO(open(filename).read()))))
sff = list(SffIterator(open(filename, "rb")))
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_index_at_start.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_index_in_middle.sff", "rb")))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
sff_trim = list(SffIterator(open(filename, "rb"), trim=True))
print ReadRocheXmlManifest(open(filename, "rb"))
from Bio import SeqIO
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads_no_trim.fasta"
fasta_no_trim = list(SeqIO.parse(open(filename,"rU"), "fasta"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads_no_trim.qual"
qual_no_trim = list(SeqIO.parse(open(filename,"rU"), "qual"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.fasta"
fasta_trim = list(SeqIO.parse(open(filename,"rU"), "fasta"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.qual"
qual_trim = list(SeqIO.parse(open(filename,"rU"), "qual"))
for s, sT, f, q, fT, qT in zip(sff, sff_trim, fasta_no_trim,
qual_no_trim, fasta_trim, qual_trim):
#print
print s.id
#print s.seq
#print s.letter_annotations["phred_quality"]
assert s.id == f.id == q.id
assert str(s.seq) == str(f.seq)
assert s.letter_annotations["phred_quality"] == q.letter_annotations["phred_quality"]
assert s.id == sT.id == fT.id == qT.id
assert str(sT.seq) == str(fT.seq)
assert sT.letter_annotations["phred_quality"] == qT.letter_annotations["phred_quality"]
print "Writing with a list of SeqRecords..."
handle = StringIO()
w = SffWriter(handle, xml=metadata)
w.write_file(sff) #list
data = handle.getvalue()
print "And again with an iterator..."
handle = StringIO()
w = SffWriter(handle, xml=metadata)
w.write_file(iter(sff))
assert data == handle.getvalue()
#Check 100% identical to the original:
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.sff"
original = open(filename,"rb").read()
assert len(data) == len(original)
assert data == original
del data
handle.close()
print "-"*50
filename = "../../Tests/Roche/greek.sff"
for record in SffIterator(open(filename,"rb")):
print record.id
index1 = sorted(_sff_read_roche_index(open(filename, "rb")))
index2 = sorted(_sff_do_slow_index(open(filename, "rb")))
assert index1 == index2
try:
print ReadRocheXmlManifest(open(filename, "rb"))
assert False, "Should fail!"
except ValueError:
pass
handle = open(filename, "rb")
for record in SffIterator(handle):
pass
try:
for record in SffIterator(handle):
print record.id
assert False, "Should have failed"
except ValueError, err:
print "Checking what happens on re-reading a handle:"
print err
"""
#Ugly code to make test files...
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
assert len(index)%8 == 0
#Ugly bit of code to make a fake index at start
records = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "w")
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
w = SffWriter(out_handle, index=False, xml=None)
#Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
w._index_start = out_handle.tell()
w._index_length = len(index)
out_handle.seek(0)
w.write_header() #this time with index info
w.handle.write(index)
for record in records:
w.write_record(record)
out_handle.close()
records2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
i = list(_sff_do_slow_index(open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
#Ugly bit of code to make a fake index in middle
records = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "w")
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
w = SffWriter(out_handle, index=False, xml=None)
#Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
for record in records[:5]:
w.write_record(record)
w._index_start = out_handle.tell()
w._index_length = len(index)
w.handle.write(index)
for record in records[5:]:
w.write_record(record)
out_handle.seek(0)
w.write_header() #this time with index info
out_handle.close()
records2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
j = list(_sff_do_slow_index(open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
#Ugly bit of code to make a fake index at end
records = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "w")
w = SffWriter(out_handle, index=False, xml=None)
#Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
for record in records:
w.write_record(record)
w._index_start = out_handle.tell()
w._index_length = len(index)
out_handle.write(index)
out_handle.seek(0)
w.write_header() #this time with index info
out_handle.close()
records2 = list(SffIterator(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
try:
print ReadRocheXmlManifest(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb"))
assert False, "Should fail!"
except ValueError:
pass
k = list(_sff_do_slow_index(open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
"""
print "Done"<|fim▁end|> | >>> print record.id, len(record)
E3MFGYR02F7Z7G 130
>>> print record.seq[:10], "..." |
<|file_name|>config-v18_test.go<|end_file_name|><|fim▁begin|>/*
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"github.com/tidwall/gjson"
)
func TestServerConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
if serverConfig.GetRegion() != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", serverConfig.GetRegion())
}
// Set new region and verify.
serverConfig.SetRegion("us-west-1")
if serverConfig.GetRegion() != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", serverConfig.GetRegion())
}
// Set new amqp notification id.
serverConfig.Notify.SetAMQPByID("2", amqpNotify{})
savedNotifyCfg1 := serverConfig.Notify.GetAMQPByID("2")
if !reflect.DeepEqual(savedNotifyCfg1, amqpNotify{}) {
t.Errorf("Expecting AMQP config %#v found %#v", amqpNotify{}, savedNotifyCfg1)
}
// Set new elastic search notification id.
serverConfig.Notify.SetElasticSearchByID("2", elasticSearchNotify{})
savedNotifyCfg2 := serverConfig.Notify.GetElasticSearchByID("2")
if !reflect.DeepEqual(savedNotifyCfg2, elasticSearchNotify{}) {
t.Errorf("Expecting Elasticsearch config %#v found %#v", elasticSearchNotify{}, savedNotifyCfg2)
}
// Set new redis notification id.
serverConfig.Notify.SetRedisByID("2", redisNotify{})
savedNotifyCfg3 := serverConfig.Notify.GetRedisByID("2")
if !reflect.DeepEqual(savedNotifyCfg3, redisNotify{}) {
t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3)
}
// Set new kafka notification id.
serverConfig.Notify.SetKafkaByID("2", kafkaNotify{})
savedNotifyCfg4 := serverConfig.Notify.GetKafkaByID("2")
if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) {
t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4)
}
// Set new Webhook notification id.
serverConfig.Notify.SetWebhookByID("2", webhookNotify{})
savedNotifyCfg5 := serverConfig.Notify.GetWebhookByID("2")
if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg5)
}
// Set new console logger.
// Set new Webhook notification id.
serverConfig.Notify.SetMySQLByID("2", mySQLNotify{})
savedNotifyCfg6 := serverConfig.Notify.GetMySQLByID("2")
if !reflect.DeepEqual(savedNotifyCfg6, mySQLNotify{}) {
t.Errorf("Expecting Webhook config %#v found %#v", mySQLNotify{}, savedNotifyCfg6)
}
consoleLogger := NewConsoleLogger()
serverConfig.Logger.SetConsole(consoleLogger)
consoleCfg := serverConfig.Logger.GetConsole()
if !reflect.DeepEqual(consoleCfg, consoleLogger) {
t.Errorf("Expecting console logger config %#v found %#v", consoleLogger, consoleCfg)
}
// Set new console logger.
consoleLogger.Enable = false
serverConfig.Logger.SetConsole(consoleLogger)
// Set new file logger.
fileLogger := NewFileLogger("test-log-file")
serverConfig.Logger.SetFile(fileLogger)
fileCfg := serverConfig.Logger.GetFile()
if !reflect.DeepEqual(fileCfg, fileLogger) {
t.Errorf("Expecting file logger config %#v found %#v", fileLogger, fileCfg)
}
// Set new file logger.
fileLogger.Enable = false
serverConfig.Logger.SetFile(fileLogger)
// Match version.
if serverConfig.GetVersion() != v18 {
t.Errorf("Expecting version %s found %s", serverConfig.GetVersion(), v18)
}
// Attempt to save.
if err := serverConfig.Save(); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}
// Do this only once here.
setConfigDir(rootPath)
// Initialize server config.
if err := loadConfig(); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
func TestServerConfigWithEnvs(t *testing.T) {
os.Setenv("MINIO_BROWSER", "off")
defer os.Unsetenv("MINIO_BROWSER")
os.Setenv("MINIO_ACCESS_KEY", "minio")
defer os.Unsetenv("MINIO_ACCESS_KEY")
os.Setenv("MINIO_SECRET_KEY", "minio123")
defer os.Unsetenv("MINIO_SECRET_KEY")
os.Setenv("MINIO_REGION", "us-west-1")
defer os.Unsetenv("MINIO_REGION")
defer resetGlobalIsEnvs()
// Get test root.
rootPath, err := getTestRoot()
if err != nil {
t.Error(err)
}
serverHandleEnvVars()
// Do this only once here.
setConfigDir(rootPath)
// Init config
initConfig()
// remove the root directory after the test ends.
defer removeAll(rootPath)
// Check if serverConfig has
if serverConfig.GetBrowser() {
t.Errorf("Expecting browser is set to false found %v", serverConfig.GetBrowser())
}
// Check if serverConfig has
if serverConfig.GetRegion() != "us-west-1" {
t.Errorf("Expecting region to be \"us-west-1\" found %v", serverConfig.GetRegion())
}
// Check if serverConfig has
cred := serverConfig.GetCredential()
if cred.AccessKey != "minio" {<|fim▁hole|> t.Errorf("Expecting access key to be `minio123` found %s", cred.SecretKey)
}
}
func TestCheckDupJSONKeys(t *testing.T) {
testCases := []struct {
json string
shouldPass bool
}{
{`{}`, true},
{`{"version" : "13"}`, true},
{`{"version" : "13", "version": "14"}`, false},
{`{"version" : "13", "credential": {"accessKey": "12345"}}`, true},
{`{"version" : "13", "credential": {"accessKey": "12345", "accessKey":"12345"}}`, false},
{`{"version" : "13", "notify": {"amqp": {"1"}, "webhook":{"3"}}}`, true},
{`{"version" : "13", "notify": {"amqp": {"1"}, "amqp":{"3"}}}`, false},
{`{"version" : "13", "notify": {"amqp": {"1":{}, "2":{}}}}`, true},
{`{"version" : "13", "notify": {"amqp": {"1":{}, "1":{}}}}`, false},
}
for i, testCase := range testCases {
err := doCheckDupJSONKeys(gjson.Result{}, gjson.Parse(testCase.json))
if testCase.shouldPass && err != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, err)
}
if !testCase.shouldPass && err == nil {
t.Errorf("Test %d, should fail but it succeed.", i+1)
}
}
}
// Tests config validator..
func TestValidateConfig(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
// remove the root directory after the test ends.
defer removeAll(rootPath)
configPath := filepath.Join(rootPath, minioConfigFile)
v := v18
testCases := []struct {
configData string
shouldPass bool
}{
// Test 1 - wrong json
{`{`, false},
// Test 2 - empty json
{`{}`, false},
// Test 3 - wrong config version
{`{"version": "10"}`, false},
// Test 4 - wrong browser parameter
{`{"version": "` + v + `", "browser": "foo"}`, false},
// Test 5 - missing credential
{`{"version": "` + v + `", "browser": "on"}`, false},
// Test 6 - missing secret key
{`{"version": "` + v + `", "browser": "on", "credential" : {"accessKey":"minio", "secretKey":""}}`, false},
// Test 7 - missing region should pass, defaults to 'us-east-1'.
{`{"version": "` + v + `", "browser": "on", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
// Test 8 - missing browser should pass, defaults to 'on'.
{`{"version": "` + v + `", "region": "us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
// Test 9 - success
{`{"version": "` + v + `", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, true},
// Test 10 - duplicated json keys
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
// Test 11 - empty filename field in File
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "logger": { "file": { "enable": true, "filename": "" } }}`, false},
// Test 12 - Test AMQP
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
// Test 13 - Test NATS
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
// Test 14 - Test ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
// Test 15 - Test Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
// Test 16 - Test PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 17 - Test Kafka
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
// Test 18 - Test Webhook
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},
// Test 20 - Test MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 21 - Test Format for MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
// Test 22 - Test valid Format for MySQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
// Test 23 - Test Format for PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
// Test 24 - Test valid Format for PostgreSQL
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true},
// Test 25 - Test Format for ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false},
// Test 26 - Test valid Format for ElasticSearch
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true},
// Test 27 - Test Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
// Test 28 - Test valid Format for Redis
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
}
for i, testCase := range testCases {
if werr := ioutil.WriteFile(configPath, []byte(testCase.configData), 0700); werr != nil {
t.Fatal(werr)
}
_, verr := getValidConfig()
if testCase.shouldPass && verr != nil {
t.Errorf("Test %d, should pass but it failed with err = %v", i+1, verr)
}
if !testCase.shouldPass && verr == nil {
t.Errorf("Test %d, should fail but it succeed.", i+1)
}
}
}<|fim▁end|> | t.Errorf("Expecting access key to be `minio` found %s", cred.AccessKey)
}
if cred.SecretKey != "minio123" { |
<|file_name|>app.py<|end_file_name|><|fim▁begin|>from lib import incrementer<|fim▁hole|><|fim▁end|> | print(incrementer(2)) |
<|file_name|>clipped_relu.py<|end_file_name|><|fim▁begin|>from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
import numpy
class ClippedReLU(function.Function):
"""Clipped Rectifier Unit function.
Clipped ReLU is written as :math:`ClippedReLU(x, z) = \min(\max(0, x), z)`,
where :math:`z(>0)` is a parameter to cap return value of ReLU.
"""
def __init__(self, z):
if not isinstance(z, float):
raise TypeError('z must be float value')<|fim▁hole|>
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype == numpy.float32)
def forward_cpu(self, x):
return utils.force_array(numpy.minimum(
numpy.maximum(0, x[0]), self.cap)).astype(numpy.float32),
def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (0 < x[0]) * (x[0] < self.cap)).astype(numpy.float32),
def forward_gpu(self, x):
return cuda.elementwise(
'T x, T cap', 'T y', 'y = min(max(x, (T)0), cap)',
'clipped_relu_fwd')(x[0], self.cap),
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T z', 'T gx',
'gx = ((x > 0) and (x < z))? gy : 0',
'clipped_relu_bwd')(x[0], gy[0], self.cap)
return gx,
def clipped_relu(x, z=20.0):
"""Clipped Rectifier Unit function.
This function is expressed as :math:`ClippedReLU(x, z)
= \min(\max(0, x), z)`, where :math:`z(>0)` is a clipping value.
Args:
x (~chainer.Variable): Input variable.
z (float): Clipping value. (default = 20.0)
Returns:
~chainer.Variable: Output variable.
"""
return ClippedReLU(z)(x)<|fim▁end|> | # z must be positive.
assert z > 0
self.cap = z |
<|file_name|>checker.py<|end_file_name|><|fim▁begin|># Azure will execute first file.
# sys.path.append is required by Azure Web Jobs. It requires that all packages are provided to it in zip file.
# env\Lib\site-packages is virtual env path in Windows
import sys
sys.path.append("env\Lib\site-packages")
import logging
import logging.config
from datetime import datetime
import config
from database import DataStore
from ifttt import IFTTT
logging.config.fileConfig('log.config')
logger = logging.getLogger(config.logger_name)
def myExceptionHook(exctype, value, traceback):
logger.error(value)
sys.__excepthook__(exctype, value, traceback)
if __name__ == '__main__':
sys.excepthook = myExceptionHook
print("Running IFTTT checker at %s" % datetime.utcnow())
store = DataStore(config.db_server, config.db_name, config.db_user, config.db_password)
rows = store.getSensorBatteryStatuses()
current_hour = datetime.utcnow().hour
<|fim▁hole|> battery = row[1]
cable = row[2]
if battery <= 15 and cable == 0 and current_hour > 19:
logger.debug("Request charging %s (%s : %s)" % (sensor_id, battery, cable))
IFTTT.sendEvent(config.ifttt_api_key, sensor_id + config.ifttt_event_on)
# Stop charging when nearing 100
if cable == 1 and battery > 96:
logger.debug("Request unplug %s (%s : %s)" % (sensor_id, battery, cable))
IFTTT.sendEvent(config.ifttt_api_key, sensor_id + config.ifttt_event_off)<|fim▁end|> | for row in rows:
sensor_id = row[0] |
<|file_name|>battlefieldutils.cpp<|end_file_name|><|fim▁begin|>/*
===========================================================================
Copyright (c) 2010-2014 Darkstar Dev Teams
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see http://www.gnu.org/licenses/
This file is part of DarkStar-server source code.
===========================================================================
*/
#include <string.h>
#include "../entities/charentity.h"
#include "../entities/mobentity.h"
#include "../party.h"
#include "charutils.h"
#include "../alliance.h"
#include "zoneutils.h"
#include "itemutils.h"
#include "battlefieldutils.h"
#include "../battlefield.h"
#include "../battlefield_handler.h"
#include "../packets/entity_update.h"
namespace battlefieldutils{
/***************************************************************
Loads the given battlefield from the database and returns
a new Battlefield object.
****************************************************************/
CBattlefield* loadBattlefield(CBattlefieldHandler* hand, uint16 bcnmid, BATTLEFIELDTYPE type){
const int8* fmtQuery = "SELECT name, bcnmId, fastestName, fastestTime, timeLimit, levelCap, lootDropId, rules, partySize, zoneId \
FROM bcnm_info \
WHERE bcnmId = %u";
int32 ret = Sql_Query(SqlHandle, fmtQuery, bcnmid);
if (ret == SQL_ERROR ||
Sql_NumRows(SqlHandle) == 0 ||
Sql_NextRow(SqlHandle) != SQL_SUCCESS)
{
ShowError("Cannot load battlefield BCNM:%i \n",bcnmid);
}
else
{
CBattlefield* PBattlefield = new CBattlefield(hand,Sql_GetUIntData(SqlHandle,1), type);
int8* tmpName;
Sql_GetData(SqlHandle,0,&tmpName,NULL);
PBattlefield->setBcnmName(tmpName);
PBattlefield->setTimeLimit(Sql_GetUIntData(SqlHandle,4));
PBattlefield->setLevelCap(Sql_GetUIntData(SqlHandle,5));
PBattlefield->setLootId(Sql_GetUIntData(SqlHandle,6));
PBattlefield->setMaxParticipants(Sql_GetUIntData(SqlHandle,8));
PBattlefield->setZoneId(Sql_GetUIntData(SqlHandle,9));
PBattlefield->m_RuleMask = (uint16)Sql_GetUIntData(SqlHandle,7);
return PBattlefield;
}
return NULL;
}
/***************************************************************
Spawns monsters for the given BCNMID/Battlefield number by
looking at bcnm_battlefield table for mob ids then spawning
them and adding them to the monster list for the given
battlefield.
****************************************************************/
bool spawnMonstersForBcnm(CBattlefield* battlefield){
DSP_DEBUG_BREAK_IF(battlefield==NULL);
//get ids from DB
const int8* fmtQuery = "SELECT monsterId, conditions \
FROM bcnm_battlefield \
WHERE bcnmId = %u AND battlefieldNumber = %u";
int32 ret = Sql_Query(SqlHandle, fmtQuery, battlefield->getID(), battlefield->getBattlefieldNumber());
if (ret == SQL_ERROR ||
Sql_NumRows(SqlHandle) == 0)
{
ShowError("spawnMonstersForBcnm : SQL error - Cannot find any monster IDs for BCNMID %i Battlefield %i \n",
battlefield->getID(), battlefield->getBattlefieldNumber());
}
else{
while(Sql_NextRow(SqlHandle) == SQL_SUCCESS){
uint32 mobid = Sql_GetUIntData(SqlHandle,0);
uint8 condition = Sql_GetUIntData(SqlHandle,1);
CMobEntity* PMob = (CMobEntity*)zoneutils::GetEntity(mobid, TYPE_MOB);
if (PMob != NULL)
{
PMob->m_battlefieldID = battlefield->getBattlefieldNumber();
PMob->m_bcnmID = battlefield->getID();
if (condition & CONDITION_SPAWNED_AT_START)
{
// This condition is needed for some mob at dynamis, else he don't pop
if(PMob->PBattleAI->GetCurrentAction() == ACTION_FADE_OUT){
PMob->PBattleAI->SetLastActionTime(0);
PMob->PBattleAI->SetCurrentAction(ACTION_NONE);
}
if (PMob->PBattleAI->GetCurrentAction() == ACTION_NONE ||
PMob->PBattleAI->GetCurrentAction() == ACTION_SPAWN)
{
PMob->PBattleAI->SetLastActionTime(0);
PMob->PBattleAI->SetCurrentAction(ACTION_SPAWN);
if(strcmp(PMob->GetName(),"Maat")==0){
mobutils::SetupMaat(PMob, (JOBTYPE)battlefield->getPlayerMainJob());
PMob->m_DropID = 4485; //Give Maat his stealable Warp Scroll
// disable players subjob
battlefield->disableSubJob();
// disallow subjob, this will enable for later
battlefield->m_RuleMask &= ~(1 << RULES_ALLOW_SUBJOBS);
}
//ShowDebug("Spawned %s (%u) id %i inst %i \n",PMob->GetName(),PMob->id,battlefield->getID(),battlefield->getBattlefieldNumber());
battlefield->addEnemy(PMob, condition);
} else {
ShowDebug(CL_CYAN"SpawnMobForBcnm: <%s> (%u) is alredy spawned\n" CL_RESET, PMob->GetName(), PMob->id);
}
} else {
battlefield->addEnemy(PMob, condition);
}
} else {
ShowDebug("SpawnMobForBcnm: mob %u not found\n", mobid);
}
}
return true;
}
return false;
}
/***************************************************************
Spawns treasure chest/armory crate, what ever on winning bcnm
****************************************************************/
bool spawnTreasureForBcnm(CBattlefield* battlefield){
DSP_DEBUG_BREAK_IF(battlefield==NULL);
//get ids from DB
const int8* fmtQuery = "SELECT npcId \
FROM bcnm_treasure_chests \
WHERE bcnmId = %u AND battlefieldNumber = %u";
int32 ret = Sql_Query(SqlHandle, fmtQuery, battlefield->getID(), battlefield->getBattlefieldNumber());
if (ret == SQL_ERROR || Sql_NumRows(SqlHandle) == 0)
{
ShowError("spawnTreasureForBcnm : SQL error - Cannot find any npc IDs for BCNMID %i Battlefield %i \n",
battlefield->getID(), battlefield->getBattlefieldNumber());
}
else
{
while(Sql_NextRow(SqlHandle) == SQL_SUCCESS)
{
uint32 npcid = Sql_GetUIntData(SqlHandle,0);
CBaseEntity* PNpc = (CBaseEntity*)zoneutils::GetEntity(npcid, TYPE_NPC);
if (PNpc != NULL)
{
PNpc->status = STATUS_NORMAL;
PNpc->animation = 0;
PNpc->loc.zone->PushPacket(PNpc, CHAR_INRANGE, new CEntityUpdatePacket(PNpc, ENTITY_SPAWN, UPDATE_ALL));
battlefield->addNpc(PNpc);
ShowDebug(CL_CYAN"Spawned %s id %i inst %i \n",PNpc->status,PNpc->id,battlefield->getBattlefieldNumber());
}else
{
ShowDebug(CL_CYAN"spawnTreasureForBcnm: <%s> is already spawned\n" CL_RESET, PNpc->GetName());
}
}
return true;
}<|fim▁hole|>
/**************************************************************
Called by ALL BCNMs to check winning conditions every tick. This
is usually when all the monsters are defeated but can be other things
(e.g. mob below X% HP, successful Steal, etc)
***************************************************************/
bool meetsWinningConditions(CBattlefield* battlefield, uint32 tick){
if (battlefield->won()) return true;
//handle odd cases e.g. stop fight @ x% HP
//handle Maat fights
if(battlefield->locked && (battlefield->m_RuleMask & RULES_MAAT))
{
// survive for 5 mins
if(battlefield->getPlayerMainJob() == JOB_WHM && (tick - battlefield->fightTick) > 5 * 60 * 1000)
return true;
if(battlefield->isEnemyBelowHPP(10))
return true;
if(battlefield->getPlayerMainJob() == JOB_THF && battlefield->m_EnemyList.at(0)->m_ItemStolen) //thf can win by stealing from maat only if maat not previously defeated
{
const int8* fmtQuery = "SELECT value FROM char_vars WHERE charid = %u AND varname = '%s' LIMIT 1;";
int32 ret = Sql_Query(SqlHandle,fmtQuery,battlefield->m_PlayerList.at(0)->id, "maatDefeated");
if(ret != SQL_ERROR && Sql_NumRows(SqlHandle) == 0)
return true;
else if(ret != SQL_ERROR && Sql_NumRows(SqlHandle) != 0 && Sql_NextRow(SqlHandle) == SQL_SUCCESS)
{
int16 value = (int16)Sql_GetIntData(SqlHandle,0);
if(value <= 0)
return true;
}
}
}
// savage
if(battlefield->getID() == 961 && battlefield->isEnemyBelowHPP(30)){
return true;
}
//generic cases, kill all mobs
if(battlefield->allEnemiesDefeated()){
return true;
}
return false;
}
/**************************************************************
Called by ALL BCNMs to check losing conditions every tick. This
will be when everyone is dead and the death timer is >3min (usually)
or when everyone has left, etc.
****************************************************************/
bool meetsLosingConditions(CBattlefield* battlefield, uint32 tick){
if (battlefield->lost()) return true;
//check for expired duration e.g. >30min. Need the tick>start check as the start can be assigned
//after the tick initially due to threading
if(tick>battlefield->getStartTime() && (tick - battlefield->getStartTime()) > battlefield->getTimeLimit()*1000){
ShowDebug("BCNM %i inst:%i - You have exceeded your time limit!\n",battlefield->getID(),
battlefield->getBattlefieldNumber(),tick,battlefield->getStartTime(),battlefield->getTimeLimit());
return true;
}
battlefield->lastTick = tick;
//check for all dead for 3min (or whatever the rule mask says)
if(battlefield->getDeadTime()!=0){
if(battlefield->m_RuleMask & RULES_REMOVE_3MIN){
// if(((tick - battlefield->getDeadTime())/1000) % 20 == 0){
// battlefield->pushMessageToAllInBcnm(200,180 - (tick - battlefield->getDeadTime())/1000);
// }
if(tick - battlefield->getDeadTime() > 180000){
ShowDebug("All players from the battlefield %i inst:%i have fallen for 3mins. Removing.\n",
battlefield->getID(),battlefield->getBattlefieldNumber());
return true;
}
}
else{
ShowDebug("All players have fallen. Failed battlefield %i inst %i. No 3min mask. \n",battlefield->getID(),battlefield->getBattlefieldNumber());
return true;
}
}
return false;
}
/*************************************************************
Returns the losing exit position for this BCNM.
****************************************************************/
void getLosePosition(CBattlefield* battlefield, int (&pPosition)[4]){
if(battlefield==NULL)
return;
switch(battlefield->getZoneId()){
case 139: //Horlais Peak
pPosition[0]=-503; pPosition[1]=158; pPosition[2]=-212; pPosition[3]=131;
break;
}
}
void getStartPosition(uint8 zoneid, int (&pPosition)[4]){
switch(zoneid){
case 139: //Horlais Peak
pPosition[0]=-503; pPosition[1]=158; pPosition[2]=-212; pPosition[3]=131;
break;
case 144: //Waug. Shrine
pPosition[0]=-361; pPosition[1]=100; pPosition[2]=-260; pPosition[3]=131;
break;
case 146: //Balgas Dias
pPosition[0]=317; pPosition[1]=-126; pPosition[2]=380; pPosition[3]=131;
break;
case 165: //Throne Room
pPosition[0]=114; pPosition[1]=-8; pPosition[2]=0; pPosition[3]=131;
break;
case 206: //QuBia Arena
pPosition[0]=-241; pPosition[1]=-26; pPosition[2]=20; pPosition[3]=131;
break;
}
}
/*************************************************************
Returns the winning exit position for this BCNM.
****************************************************************/
void getWinPosition(CBattlefield* battlefield, int (&pPosition)[4]){
if(battlefield==NULL)
return;
switch(battlefield->getZoneId()){
case 139: //Horlais Peak
pPosition[0]=445; pPosition[1]=-38; pPosition[2]=-19; pPosition[3]=200;
break;
}
}
uint8 getMaxLootGroups(CBattlefield* battlefield){
const int8* fmtQuery = "SELECT MAX(lootGroupId) \
FROM bcnm_loot \
JOIN bcnm_info ON bcnm_info.LootDropId = bcnm_loot.LootDropId \
WHERE bcnm_info.LootDropId = %u LIMIT 1";
int32 ret = Sql_Query(SqlHandle, fmtQuery, battlefield->getLootId());
if (ret == SQL_ERROR || Sql_NumRows(SqlHandle) == 0 || Sql_NextRow(SqlHandle) != SQL_SUCCESS){
ShowError("SQL error occured \n");
return 0;
}
else {
return (uint8)Sql_GetUIntData(SqlHandle,0);
}
}
uint16 getRollsPerGroup(CBattlefield* battlefield, uint8 groupID){
const int8* fmtQuery = "SELECT SUM(CASE \
WHEN LootDropID = %u \
AND lootGroupId = %u \
THEN rolls \
ELSE 0 END) \
FROM bcnm_loot;";
int32 ret = Sql_Query(SqlHandle, fmtQuery, battlefield->getLootId(), groupID);
if (ret == SQL_ERROR || Sql_NumRows(SqlHandle) == 0 || Sql_NextRow(SqlHandle) != SQL_SUCCESS){
ShowError("SQL error occured \n");
return 0;
}
else {
return (uint16)Sql_GetUIntData(SqlHandle,0);
}
}
/*************************************************************
Get loot from the armoury crate
****************************************************************/
void getChestItems(CBattlefield* battlefield){
int instzone = battlefield->getZoneId();
uint8 maxloot = 0;
LootList_t* LootList = itemutils::GetLootList(battlefield->getLootId());
if (LootList == NULL){
ShowError("BCNM Chest opened with no valid loot list!");
//no loot available for bcnm. End bcnm.
battlefield->winBcnm();
return;
}
else
{
for (uint8 sizeoflist=0; sizeoflist < LootList->size() ; ++sizeoflist){
if(LootList->at(sizeoflist).LootGroupId > maxloot){
maxloot= LootList->at(sizeoflist).LootGroupId;
}
}
}
//getMaxLootGroups(battlefield);
if(maxloot!=0){
for (uint8 group = 0; group <= maxloot; ++group){
uint16 maxRolls = getRollsPerGroup(battlefield,group);
uint16 groupRoll = (uint16)(WELL512::irand()%maxRolls);
uint16 itemRolls = 0;
for (uint8 item = 0; item < LootList->size(); ++item)
{
if (group == LootList->at(item).LootGroupId)
{
itemRolls += LootList->at(item).Rolls;
if (groupRoll <= itemRolls)
{
battlefield->m_PlayerList.at(0)->PTreasurePool->AddItem(LootList->at(item).ItemID, battlefield->m_NpcList.at(0));
break;
}
}
}
}
}
//user opened chest, complete bcnm
if(instzone!=37 && instzone!=38 ){
battlefield->winBcnm();
}
else{
battlefield->m_NpcList.clear();
}
}
bool spawnSecondPartDynamis(CBattlefield* battlefield){
DSP_DEBUG_BREAK_IF(battlefield==NULL);
//get ids from DB
const int8* fmtQuery = "SELECT monsterId \
FROM bcnm_battlefield \
WHERE bcnmId = %u AND battlefieldNumber = 2";
int32 ret = Sql_Query(SqlHandle, fmtQuery, battlefield->getID());
if (ret == SQL_ERROR ||
Sql_NumRows(SqlHandle) == 0)
{
ShowError("spawnSecondPartDynamis : SQL error - Cannot find any monster IDs for Dynamis %i \n",
battlefield->getID(), battlefield->getBattlefieldNumber());
}
else{
while(Sql_NextRow(SqlHandle) == SQL_SUCCESS){
uint32 mobid = Sql_GetUIntData(SqlHandle,0);
CMobEntity* PMob = (CMobEntity*)zoneutils::GetEntity(mobid, TYPE_MOB);
if (PMob != NULL)
{
if (PMob->PBattleAI->GetCurrentAction() == ACTION_NONE ||
PMob->PBattleAI->GetCurrentAction() == ACTION_SPAWN)
{
PMob->PBattleAI->SetLastActionTime(0);
PMob->PBattleAI->SetCurrentAction(ACTION_SPAWN);
PMob->m_battlefieldID = battlefield->getBattlefieldNumber();
ShowDebug("Spawned %s (%u) id %i inst %i \n",PMob->GetName(),PMob->id,battlefield->getID(),battlefield->getBattlefieldNumber());
battlefield->addEnemy(PMob, CONDITION_SPAWNED_AT_START & CONDITION_WIN_REQUIREMENT);
} else {
ShowDebug(CL_CYAN"spawnSecondPartDynamis: <%s> (%u) is alredy spawned\n" CL_RESET, PMob->GetName(), PMob->id);
}
} else {
ShowDebug("spawnSecondPartDynamis: mob %u not found\n", mobid);
}
}
return true;
}
return false;
}
};<|fim▁end|> | return false;
}
|
<|file_name|>fancy_getopt.py<|end_file_name|><|fim▁begin|>"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
__revision__ = "$Id: fancy_getopt.py 58495 2007-10-16 18:12:55Z guido.van.rossum $"
import sys, string, re<|fim▁hole|>
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = lambda s: s.replace('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return longopt_xlate(long_option)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return longopt_xlate(opt)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()<|fim▁end|> | import getopt
from distutils.errors import * |
<|file_name|>test_tetengo2.iterator.polymorphic_forward_iterator.cpp<|end_file_name|><|fim▁begin|>/*! \file
\brief Test of class tetengo2::iterator::observable_forward_iteratorpolymorphic_forward_iterator.
Copyright (C) 2007-2019 kaoru
$Id$
*/
#include <sstream>
#include <string>
#include <boost/operators.hpp>
#include <boost/preprocessor.hpp>
#include <boost/test/unit_test.hpp>
#include <tetengo2/iterator/polymorphic_forward_iterator.h>
namespace {
// types
using base_type = tetengo2::iterator::polymorphic_forward_iterator<std::string>;
class concrete_iterator : public base_type
{
public:
explicit concrete_iterator(const int first) : m_value(first, 'a') {}
private:
std::string m_value;
virtual reference dereference() override
{
return m_value;
}
virtual bool equal(const base_type& another) const override
{
const concrete_iterator* const p_another = dynamic_cast<const concrete_iterator*>(&another);
if (!p_another)
return false;
return m_value == p_another->m_value;
}
virtual void increment() override
{
m_value += "a";
}
};
class other_concrete_iterator : public base_type
{
public:
explicit other_concrete_iterator(const int first) : m_value(first, 'a') {}
private:
std::string m_value;
virtual reference dereference() override
{
return m_value;
}
virtual bool equal(const base_type& another) const override
{
const other_concrete_iterator* const p_another = dynamic_cast<const other_concrete_iterator*>(&another);
if (!p_another)
return false;
return m_value == p_another->m_value;
}
virtual void increment() override
{
m_value += "a";
}
};
}
BOOST_AUTO_TEST_SUITE(test_tetengo2)
BOOST_AUTO_TEST_SUITE(iterator)
BOOST_AUTO_TEST_SUITE(polymorphic_forward_iterator)
// test cases
BOOST_AUTO_TEST_CASE(construction)
{
BOOST_TEST_PASSPOINT();
const concrete_iterator iter{ 42 };
}
BOOST_AUTO_TEST_CASE(operator_dereference)
{
BOOST_TEST_PASSPOINT();
{
const concrete_iterator iter{ 42 };
BOOST_CHECK(*iter == std::string(42, 'a'));
}
{
concrete_iterator iter{ 42 };
*iter = std::string(24, 'a');
BOOST_CHECK(*iter == std::string(24, 'a'));
}
}
BOOST_AUTO_TEST_CASE(operator_arrow)
{
BOOST_TEST_PASSPOINT();
{
const concrete_iterator iter{ 42 };
BOOST_TEST(iter->length() == 42U);
}
{
const concrete_iterator iter{ 42 };
iter->append("a");
BOOST_TEST(iter->length() == 43U);
}
}
BOOST_AUTO_TEST_CASE(operator_increment)
{
BOOST_TEST_PASSPOINT();
concrete_iterator iter{ 42 };
const base_type& incremented = ++iter;
BOOST_CHECK(*iter == std::string(43, 'a'));
BOOST_CHECK(*incremented == std::string(43, 'a'));
}
BOOST_AUTO_TEST_CASE(operator_postincrement)
{
BOOST_TEST_PASSPOINT();
concrete_iterator iter{ 42 };
iter++;
BOOST_CHECK(*iter == std::string(43, 'a'));
}
BOOST_AUTO_TEST_CASE(operator_equal)
{
BOOST_TEST_PASSPOINT();
{
const concrete_iterator iter1{ 42 };
const concrete_iterator iter2{ 42 };
BOOST_CHECK(iter1 == iter2);
}
{
const concrete_iterator iter1{ 42 };
const concrete_iterator iter2{ 24 };
BOOST_CHECK(!(iter1 == iter2));
}
{
const concrete_iterator iter1{ 42 };
const other_concrete_iterator iter2{ 42 };
BOOST_CHECK(!(iter1 == iter2));
}
}
<|fim▁hole|> {
BOOST_TEST_PASSPOINT();
{
const concrete_iterator iter1{ 42 };
const concrete_iterator iter2{ 42 };
BOOST_CHECK(!(iter1 != iter2));
}
{
const concrete_iterator iter1{ 42 };
const concrete_iterator iter2{ 24 };
BOOST_CHECK(iter1 != iter2);
}
{
const concrete_iterator iter1{ 42 };
const other_concrete_iterator iter2{ 42 };
BOOST_CHECK(iter1 != iter2);
}
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()<|fim▁end|> | BOOST_AUTO_TEST_CASE(operator_not_equal)
|
<|file_name|>accordion.js<|end_file_name|><|fim▁begin|>var accordion = function() {<|fim▁hole|><|fim▁end|> | var e = new Foundation.Accordion($('.accordion'));
} |
<|file_name|>PopulationAggregationMetaDataProvider.java<|end_file_name|><|fim▁begin|>package org.obiba.mica.search.aggregations;<|fim▁hole|>import org.obiba.mica.micaConfig.service.helper.AggregationMetaDataProvider;
import org.obiba.mica.micaConfig.service.helper.PopulationIdAggregationMetaDataHelper;
import org.springframework.stereotype.Component;
import javax.inject.Inject;
import java.util.Map;
@Component
public class PopulationAggregationMetaDataProvider implements AggregationMetaDataProvider {
private static final String AGGREGATION_NAME = "populationId";
private final PopulationIdAggregationMetaDataHelper helper;
@Inject
public PopulationAggregationMetaDataProvider(PopulationIdAggregationMetaDataHelper helper) {
this.helper = helper;
}
@Override
public MetaData getMetadata(String aggregation, String termKey, String locale) {
Map<String, LocalizedMetaData> dataMap = helper.getPopulations();
return AGGREGATION_NAME.equals(aggregation) && dataMap.containsKey(termKey) ?
MetaData.newBuilder()
.title(dataMap.get(termKey).getTitle().get(locale))
.description(dataMap.get(termKey).getDescription().get(locale))
.className(dataMap.get(termKey).getClassName())
.build() : null;
}
@Override
public boolean containsAggregation(String aggregation) {
return AGGREGATION_NAME.equals(aggregation);
}
@Override
public void refresh() {
}
}<|fim▁end|> | |
<|file_name|>analyzer_wysinwyx.rs<|end_file_name|><|fim▁begin|>//! This module offers structs and traits for valueset analysis as inctroduced
//! in "Analyzing Memory Access in x86 Executables" by Gogul Balakrishnan and
//! Thomas Reps
//! For a more complete work on this topic, see the dissertation(!) (there is
//! also an article with the same title) of Gogul Balakrishnan:
//! "WYSINWYX: WHAT YOU SEE IS NOT WHAT YOU EXECUTE"
//! It offers datastructures specific to memory access
//! VSA (value-set analysis) analyzes access patterns on memory
//! an a-loc is an "abstract location" representing roughly a variable in C
//! This implementation is still work in progress.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use petgraph::graph::{NodeIndex,EdgeIndex};
use super::{StridedInterval_u,AbstractValue};
use super::mem_structs::{AbstractStore,MemRegion,A_Loc,AbstractAddress};
use super::mem_structs::{MemRegionType};
use frontend::containers::{RModule,RadecoModule,RFunction,RadecoFunction};
use frontend::bindings::{RBindings,RadecoBindings,Binding};
use frontend::source::Source;
use middle::ssa::ssa_traits::NodeData as TNodeData;
use middle::ssa::ssa_traits::{SSA,NodeType,ValueType};
use middle::ssa::ssastorage::{NodeData,SSAStorage};
use middle::ir::{MOpcode,MAddress,MArity};
use middle::ir_writer::{IRWriter};
use r2api::structs::{LRegInfo};
//use esil::parser::{Parse, Parser};
//use esil::lexer::{Token, Tokenizer};
// General Notes:
// for efficient impl of abstract store use "applicative dictionaries"
// (see end of Analyzing Memory Accesses in x86 Executables end of sec 3)
// each instruction (node in our case?) takes an abstract store as input
// and outputs one - this implementation does have to adapt this
// implement those 'transformers'
fn perform_op(op: MOpcode, operands: Vec<i64>) -> i64 {
debug!("\t\t\tperform op: {:?}, {:?}", op, operands);
match op {
MOpcode::OpAdd => operands[0] + operands[1],
MOpcode::OpSub => operands[0] - operands[1],
//MOpcode::OpMul => operands[0] * operands[1], //FIXME panics on overflow
MOpcode::OpMul => {
let (res, overflow) = operands[0].overflowing_mul(operands[1]);
// Actually I do not know how to hande this correctly.
// (Even _whether_ this is to be handled.)
if overflow {warn!("Multiplication overflowed!")};
res
},
MOpcode::OpDiv => operands[0] / operands[1],
MOpcode::OpMod => operands[0] % operands[1],
MOpcode::OpAnd => operands[0] & operands[1],
MOpcode::OpOr => operands[0] | operands[1],
MOpcode::OpXor => operands[0] ^ operands[1],
MOpcode::OpNot => !operands[0],
//MOpcode::OpEq => operands[0] == operands[1],
//MOpcode::OpGt => operands[0] - operands[1],
//MOpcode::OpLt => operands[0] - operands[1],
MOpcode::OpLsl => operands[0] << operands[1],
MOpcode::OpLsr => operands[0] >> operands[1],
//MOpcode::OpNarrow(_) => ("narrow", MArity::Unary),
//MOpcode::OpWiden(_) => ("widen", MArity::Unary),
MOpcode::OpConst(c) => c as i64,
_ => 0,
}
}
//TODO get information about available registers/architecture/...
//from somewhere (ssa?)
fn is_stack_pointer(comment: &String) -> bool {
match comment.as_ref() {
"rsp" | "esp" => {true}
_ => {false}
}
}
fn is_base_pointer(comment: &String) -> bool {
match comment.as_ref() {
"rbp" | "ebp" => {true}
_ => {false}
}
}
fn is_gen_purpose_reg(comment: &String) -> bool {
match comment.as_ref() {
"rax" | "eax" |
"rbx" | "ebx" |
"rcx" | "ecx" |
"rdx" | "edx" |
"rdi" | "edi" |
"rsi" | "esi" |
"r11"
| "af" | "cf" | "of" | "pf" | "sf" | "tf" | "zf" | "ds"
=> {true}
_ => {false}
}
}
fn is_instruction_pointer(comment: &String) -> bool {
match comment.as_ref() {
"rip" | "eip" => {true}
_ => {false}
}
}
fn is_register(comment: &String) -> bool {
is_stack_pointer(comment)
| is_base_pointer(comment)
| is_gen_purpose_reg(comment)
| is_instruction_pointer(comment)
}
pub struct FnAnalyzer<RFn>
where RFn : RFunction + Clone
{
//rfn: RFn,
ssa: RFn::SSA,
a_store_fn: AbstractStore<<<RFn as RFunction>::SSA as SSA>::ValueRef>,
mem_reg_local: MemRegion,
stack_size: Option<u64>,
}
impl<RFn> FnAnalyzer<RFn>
where RFn: RFunction + Clone,
<RFn as RFunction>::SSA: Clone
{
pub fn from(rfn: RFn) -> FnAnalyzer<RFn> {
FnAnalyzer {
//rfn: rfn,
ssa: (*rfn.ssa_ref()).clone(),
a_store_fn: AbstractStore::new(),
mem_reg_local: MemRegion::new(MemRegionType::Local),
stack_size: None,
}
}
/// Print the SSA node as one expression
/// for example: load((rax + 42) - 23)
fn print_node_as_comp(&self,
node: <<RFn as RFunction>::SSA as SSA>::ValueRef)
-> String {
let op_type = self.ssa.get_node_data(&node).expect("No node data.").nt;
//debug!("print_node: {:?}", op_type);
match op_type {
NodeType::Op(opcode) => {
let ops = self.ssa.get_operands(&node);
match opcode.arity() {
MArity::Zero => {
match opcode {
MOpcode::OpConst(c) => format!("{}", c),
_ => format!("{}", opcode.to_string()),
}
},
MArity::Unary => {
format!("{}{}",
opcode.to_string(),
self.print_node_as_comp(ops[0]))
},
MArity::Binary => {
match opcode {
MOpcode::OpLoad => {
//format!("{}({}, {})",
// opcode.to_string(),
// self.print_node_as_comp(ops[0]),
// self.print_node_as_comp(ops[1]))
//},
format!("{}({})",
opcode.to_string(),
self.print_node_as_comp(ops[1]))
},
MOpcode::OpStore => {
// FIXME probably has wrong arity
//format!("{}({}, {}, {})",
// opcode.to_string(),
// self.print_node_as_comp(ops[0]),
// self.print_node_as_comp(ops[1]),
// self.print_node_as_comp(ops[2]))
format!("{}({}, {})",
opcode.to_string(),
self.print_node_as_comp(ops[1]),
self.print_node_as_comp(ops[2]))
},
_ => format!("({} {} {})",
self.print_node_as_comp(ops[0]),
opcode.to_string(),
self.print_node_as_comp(ops[1])),
}
},
MArity::Ternary => {
format!("{}({}, {}, {})",
opcode.to_string(),
self.print_node_as_comp(ops[0]),
self.print_node_as_comp(ops[1]),
self.print_node_as_comp(ops[2]))
},
}
},
NodeType::Comment(c) => format!("{}", c),
//NodeType::Phi => format!("(Phi)"),
NodeType::Phi => {
let mut ret = format!("Phi(");
let ops = self.ssa.get_operands(&node);
for op in ops {
ret = ret + &format!("{}, ", self.print_node_as_comp(op));
}
ret + &format!(")")
},
NodeType::Undefined => format!("(Undefinded optype)"),
}
}
/// When a load operation is given, it tries to load
/// something from a given aloc.
/// This function traverses all store operations,
/// checks whether the given a-loc matches,
/// if so, return value stored to a-loc,
/// else , return uninitialized value
/// Takes as arguments the store node, and the a-loc
fn compute_loaded_value(&self,
node: <<RFn as RFunction>::SSA as SSA>::ValueRef,
(a_loc_base, a_loc_offs):
(A_Loc<<<RFn as RFunction>::SSA as SSA>::ValueRef>,
i64))
-> StridedInterval_u
{
debug!("compute_loaded_value({:?})", node);
debug!("\tcalc: {}", self.print_node_as_comp(node));
debug!("\ta-loc base: {}, offs: {}", a_loc_base, a_loc_offs);
let node_data = self.ssa.get_node_data(&node).expect("No node data.");
let op_type = node_data.nt;
let operands = self.ssa.get_operands(&node);
//debug!("\toperands: {:?}", operands);
//debug!("\t\t{:?} - {:?}", op_type, node);
//for op in &operands {
// let nd = self.ssa.get_node_data(&op).expect("No node data.");
// debug!("\t\t\t{:?}", nd.nt);
//}
match op_type {
NodeType::Comment(ref c) if c.eq("mem") => {
//debug!("\t\t\t\tNo matching a-loc found - ret uninitialized");
// We are loading from a memory region of which we don't know the value
StridedInterval_u::new()
},
NodeType::Op(MOpcode::OpStore) => {
let mem_state = operands[0];
let target_node = operands[1];
let value_node = operands[2];
let (a_loc_base_stored, a_loc_offs_stored) =
self.compute_a_loc(target_node)
.expect("No base a-loc to store operation");
debug!("\t\t\t\tcomparing to:\n\t\t\t{}, {}",
a_loc_base_stored, a_loc_offs_stored);
if (a_loc_base.clone(), a_loc_offs) ==
(a_loc_base_stored.clone(), a_loc_offs_stored) {
debug!("\t\t\t\tmatching - found storing location, get stored value");
self.compute_abstract_value(value_node)
} else {
debug!("\t\t\t\tNo matching a-loc - continuing search");
self.compute_loaded_value(mem_state, (a_loc_base, a_loc_offs))
}
},
NodeType::Phi => {
let loaded_val1 =
self.compute_loaded_value(operands[0],
(a_loc_base.clone(), a_loc_offs));
let loaded_val2 =
self.compute_loaded_value(operands[1],
(a_loc_base.clone(), a_loc_offs));
loaded_val1.join(loaded_val2)
},
_ => {
error!("unexpected op_type `{}'", op_type);
panic!()
} // This should never be called on something TODO: handle with .expect()
// that is not an OpStore or Comment("mem")
}
}
/// Computes an abstract value for a given node
/// uninitialized value for Comment("<reg>")
/// uninitialized value for Comment("mem")
/// c for Constant(c)
/// value at a-loc (+ offset) for Load from a-loc (+ offset)
/// stored value for Store
/// result of (arithmetic) operation
fn compute_abstract_value(&self,
node: <<RFn as RFunction>::SSA as SSA>::ValueRef)
-> StridedInterval_u
{
let node_data = self.ssa.get_node_data(&node).expect("No node data.");
let op_type = node_data.nt;
let operands = self.ssa.get_operands(&node);
debug!("compute_concrete_value({:?})", node);
debug!("\tcalc: {:?}", self.print_node_as_comp(node));
debug!("\toperands: {:?}", operands);
match op_type {
NodeType::Comment(ref c) if is_register(c) => {
StridedInterval_u::Undefined
},
NodeType::Op(MOpcode::OpConst(c)) => {
StridedInterval_u::from_const(c as i64)
},
//NodeType::Comment(ref c) if is_base_pointer(c) => {},
//NodeType::Comment(ref c) if is_gen_purpose_reg(c) => {},
NodeType::Op(MOpcode::OpStore) => {
self.compute_abstract_value(operands[2])
},
NodeType::Op(MOpcode::OpLoad) => {
let (a_loc_base, a_loc_offs) =
self.compute_a_loc(operands[1])
.expect("No base a-loc found");
self.compute_loaded_value(operands[0], (a_loc_base, a_loc_offs))
},
//TODO use process_op()
NodeType::Op(MOpcode::OpAdd) => {
self.compute_abstract_value(operands[0]) +
self.compute_abstract_value(operands[1])
},
NodeType::Op(MOpcode::OpSub) => {
self.compute_abstract_value(operands[0]) -
self.compute_abstract_value(operands[1])
},
NodeType::Op(MOpcode::OpMul) => {
self.compute_abstract_value(operands[0]) *
self.compute_abstract_value(operands[1])
},
NodeType::Op(MOpcode::OpDiv) => {
self.compute_abstract_value(operands[0]) /
self.compute_abstract_value(operands[1])
},
//NodeType::Op(opcode) => {
// match opcode {}
//},
_ => {<|fim▁hole|> }, // FIXME
}
}
/// Computes either underlying register or memory region plus offset
/// takes a node as argument
// TODO seems conceptually broken, a-loc already includes offset
// second ret value same as compute_abstract_value?
// currently returns stuff with ambigous meaning:
// on OpStore returns the content of the a-loc
// instead of a-loc offset (which is already included in the a-loc)
fn compute_a_loc(&self, node: <<RFn as RFunction>::SSA as SSA>::ValueRef)
-> Option<(A_Loc<<<RFn as RFunction>::SSA as SSA>::ValueRef>, i64)>
{
debug!("compute_a_loc({:?})", node);
debug!("\tcalc: {:?}", self.print_node_as_comp(node));
let node_data = self.ssa.get_node_data(&node).expect("No node data.");
let ValueType::Integer {width} = node_data.vt;
let op_type = node_data.nt;
let operands = self.ssa.get_operands(&node);
match op_type {
//NodeType::Comment(ref c) if is_stack_pointer(c) => {
// just found a stack pointer - nothing special
// TODO store information about stack in FnAnalyzer
// TODO what about use of basepointer?
// let vt = node_data.vt;
// debug!("Found Local a-loc - rsp (compute_a_loc), offset: 0");
// Some((A_Loc {
// addr: AbstractAddress::MemAddr {
// region: self.mem_reg_local.clone(),
// offset: 0, //TODO other initial value?
// },
// size: Some(width as i64),
// }, 0))
//},
//NodeType::Comment(ref c) if is_base_pointer(c) => {
NodeType::Comment(ref c) if is_register(c) => {
Some((A_Loc {
addr: AbstractAddress::Reg {
reg_name: c.clone(),
},
size: Some(width as i64),
}, 0))
},
NodeType::Comment(ref c) if c.eq("mem") => {
// TODO Probably not what we want, but easier to detect in the end
debug!("Found Global a-loc (compute_a_loc), offs: 0");
Some((A_Loc {
addr: AbstractAddress::MemAddr {
// TODO Have *one* global memregion
region: MemRegion::new(MemRegionType::Global),
// TODO 0 is probably not the right offset
offset: 0,
},
size: Some(width as i64),
}, 0))
},
NodeType::Comment(c) => None, // TODO
NodeType::Op(MOpcode::OpStore) => { //TODO
// should return set of all a-loc that have been written to
// up to 'now'
// value that will be stored in the a-loc
let mem_state = operands[0];
let target_node = operands[1];
let value_node = operands[2];
let value = self.compute_abstract_value(value_node).as_const();
// TODO we want to get value of a-loc, too
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(target_node) {
if let A_Loc{
addr: AbstractAddress::Reg{ reg_name: reg_name},
..} = a_loc_base {
let mem_reg = if reg_name.eq("rip") { // what about "mem"?
MemRegion{region_type: MemRegionType::Global}
} else {
self.mem_reg_local.clone()
};
Some ((A_Loc {
addr: AbstractAddress::MemAddr {
region: mem_reg,
offset: a_loc_offs,
},
size: Some(width as i64),
}, value))
} else {None}
} else {None}
},
NodeType::Op(MOpcode::OpLoad) => {None}, // TODO
NodeType::Op(m_opcode) => {
if operands.len() >= 2 {
let update = self.compute_abstract_value(operands[1]).as_const();
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(self.ssa.lhs(&node)) {
Some((a_loc_base,
perform_op(m_opcode, vec![a_loc_offs, update]))) //TODO
} else {None}
} else if operands.len() >= 1 {
let update = self.compute_abstract_value(operands[0]).as_const();
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(self.ssa.lhs(&node)) {
Some((a_loc_base,
perform_op(m_opcode, vec![a_loc_offs, update]))) //TODO
} else {None}
} else {None}
},
NodeType::Phi => {
// TODO will this code ever be executed?
// For the case both a-locs are the same, simply return
let a_loc_off_a = self.compute_a_loc(operands[0]);
let a_loc_off_b = self.compute_a_loc(operands[1]);
if a_loc_off_a == a_loc_off_b {
a_loc_off_a
} else {
warn!("don't know which a-loc to return");
None // TODO what to do otherwise?
}
},
NodeType::Undefined => None,
}
}
/// Analyze a single function.
//TODO: rename -> analyze ?
pub fn analyze_rfn(mut self)
-> AbstractStore<<<RFn as RFunction>::SSA as SSA>::ValueRef>
{
// mem region for function
info!("analyzing function");
for node in self.ssa.nodes() {
debug!("analyzing node: {:?}", node);
if let Ok(node_data) = self.ssa.get_node_data(&node) {
debug!("\t\tnode data: {:?}", node_data);
debug!("\t\tcalc: {}", self.print_node_as_comp (node));
debug!("\t\tvalue: {}", self.compute_abstract_value (node));
}
if self.ssa.is_expr (&node) {
debug!("\t\tis expr: {:?}", node);
//debug!("\t\toperands: {:?}", self.ssa.get_operands(&node));
//debug!("\t\tnode data: {:?}", self.ssa.get_node_data(&node));
debug!("\t\tcalc: {}", self.print_node_as_comp (node));
debug!("\t\tvalue: {}", self.compute_abstract_value (node));
//debug!("\t\t#operands ({}):", self.ssa.get_operands(&node).len());
//for operand in self.ssa.get_operands (&node) {
// debug!("\t\t\t{:?}", self.ssa.get_node_data(&operand));
// //debug!("\t\t\tinvolved regs:");
// //for reg in involved_registers (self.ssa, operand) {
// // debug!("\t\t\t\t{:?}", reg);
// //}
//}
{ //compute a-loc for SSA node
let a_loc = A_Loc {
addr: AbstractAddress::new_ssa_node(node),
size: None, // FIXME
};
let content = self.compute_abstract_value(node);
debug!("Computed concrete value: {}", content);
self.a_store_fn.store.insert(a_loc.clone(), content);
}
debug!("Computet a-loc for SSA-node");
//compute a-loc/check for existing a-loc
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(node) {
debug!("Computed a-loc");
let op_type = self.ssa.get_node_data(&node)
.expect("No node data.").nt;
//debug!("{:?}", op_type);
debug!("calc: {}", self.print_node_as_comp (node));
//compute value-set
let content = self.compute_abstract_value(node);
debug!("a-loc: {}", a_loc_base);
debug!("\tcontent: {}", content);
self.a_store_fn.update (a_loc_base.clone(), content.clone());
//update a-loc -> value-set
}
}
};
self.a_store_fn
}
}
/// A Value Set Analyzer (VSAnalyzer)
/// This analyzes access patterns in memory
// make generic over architecture
pub trait ValueSetAnalyzer {
type N: Hash + Eq + Clone;
fn analyze_value_sets_ssa (&self) -> AbstractStore<Self::N>;
//fn analyze_value_sets_esil (&'a mut self) -> AbstractStore<N>;
}
impl<'a, F: RFunction + Clone> ValueSetAnalyzer for RadecoModule<'a, F>
where <F as RFunction>::SSA: Clone
{
type N = <<F as RFunction>::SSA as SSA>::ValueRef;
fn analyze_value_sets_ssa (&self) -> AbstractStore<Self::N> {
let mut a_store = AbstractStore::new();
let mem_reg_global: MemRegion = MemRegion::new(MemRegionType::Global);
let fkns = self.functions.iter();
for (ref addr, rfn) in fkns {
if (!rfn.fn_name().eq("sym.main")) & (!rfn.fn_name().eq("main")) {
continue;
}
let fn_analyzer = FnAnalyzer::from((*rfn).clone());
let mut a_store_fn = fn_analyzer.analyze_rfn();
a_store.merge (&mut a_store_fn);
}
println!("Returning Abstract Store:");
for (a_loc, strid_interv) in &a_store.store {
if let A_Loc{addr: AbstractAddress::Node{node: node}, ..} = *a_loc {
continue;
};
println!("{:?}", a_loc);
println!("Strided Interval: {}", strid_interv);
}
a_store
}
//fn analyze_value_sets_esil (&'a mut self) -> AbstractStore {
//}
}
#[cfg(test)]
mod vsa {
use super::*;
use frontend::containers::RadecoModule;
use frontend::source::FileSource;
#[test]
#[ignore]
// Disable it temporarily.
fn exist_ssa() {
let mut fsource = FileSource::open(Some("./test_files/ct1_sccp_ex/ct1_sccp_ex"));
let rmod = RadecoModule::from(&mut fsource);
let a_store = rmod.analyze_value_sets_ssa ();
}
}<|fim▁end|> | warn!("Fallthrough");
StridedInterval_u::Undefined |
<|file_name|>image.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as np
import OpenGL.GL as gl
import texture, shader, colormap, color
class Image(object):
''' '''
def __init__(self, Z, format=None, cmap=colormap.IceAndFire, vmin=None, vmax=None,
interpolation='nearest', origin='lower', lighted=False,
gridsize=(0.0,0.0,0.0), elevation = 0.0):
''' Creates a texture from numpy array.
Parameters:
-----------
Z : numpy array
Z may be a float32 or uint8 array with following shapes:
* M
* MxN
* MxNx[1,2,3,4]
format: [None | 'A' | 'LA' | 'RGB' | 'RGBA']
Specify the texture format to use. Most of times it is possible to
find it automatically but there are a few cases where it not
possible to decide. For example an array with shape (M,3) can be
considered as 2D alpha texture of size (M,3) or a 1D RGB texture of
size (M,).
interpolation: 'nearest', 'bilinear' or 'bicubic'
Interpolation method.
vmin: scalar
Minimal representable value.
vmax: scalar
Maximal representable value.
origin: 'lower' or 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner.
'''
self._lut = None
self._interpolation = interpolation
self._lighted = lighted
self._gridsize = gridsize
self._elevation = elevation
self._texture = texture.Texture(Z)
self._origin = origin
self._vmin = vmin
self._vmax = vmax
self._data = Z
self.cmap = cmap # This takes care of actual build
self._shader = None
self.build()
def build(self):
''' Build shader '''
interpolation = self._interpolation
gridsize = self._gridsize
elevation = self._elevation
lighted = self._lighted
cmap = self._cmap
self._shader = None
# Source format is RGB or RGBA, no need of a colormap
if self._texture.src_format in [gl.GL_RGB,gl.GL_RGBA]:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = None
# Source format is not RGB or RGBA
else:
if cmap:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = shader.Nearest(True, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
if interpolation == 'bicubic':
self._shader = shader.Bicubic(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
elif interpolation == 'bilinear':
self._shader = shader.Bilinear(False, lighted=lighted, gridsize=gridsize, elevation=elevation)
else:
self._shader = None
self.update()
@property
def shape(self):
''' Underlying array shape. '''
return self._data.shape
@property
def data(self):
''' Underlying array '''
return self._data
@property
def texture(self):
''' Underlying texture '''
return self._texture
@property
def shader(self):
''' Currently active shader '''
return self._shader
@property
def format(self):
''' Array representation format (string). '''
format = self._texture.src_format
if format == gl.GL_ALPHA:
return 'A'
elif format == gl.GL_LUMINANCE_ALPHA:
return 'LA'
elif format == gl.GL_RGB:
return 'RGB'
elif format == gl.GL_RGBA:
return 'RGBA'
def _get_cmap(self):
return self._cmap
def _set_cmap(self, cmap):
self._cmap = cmap
colors = self.cmap.LUT['rgb'][1:].flatten().view((np.float32,3))
self._lut = texture.Texture(colors)
cmap = property(_get_cmap, _set_cmap,
doc=''' Colormap to be used to represent the array. ''')
def _get_elevation(self):
return self._elevation
def _set_elevation(self, elevation):
# Do we need to re-build shader ?
if not (elevation*self._elevation):
self._elevation = elevation
self.build()
elif self._shader:
self._elevation = elevation
self._shader._elevation = elevation
elevation = property(_get_elevation, _set_elevation,
doc=''' Image elevation. ''')
def _get_origin(self):
return self._origin
def _set_origin(self, origin):
self._origin = origin
origin = property(_get_origin, _set_origin,
doc=''' Place the [0,0] index of the array in the upper
left or lower left corner. ''')
def _get_lighted(self):
return self._lighted
def _set_lighted(self, lighted):
self._lighted = lighted
self.build()
lighted = property(_get_lighted, _set_lighted,
doc=''' Indicate whether image is ligthed. ''')
def _get_interpolation(self):
return self._interpolation
def _set_interpolation(self, interpolation):
self._interpolation = interpolation
self.build()
interpolation = property(_get_interpolation, _set_interpolation,
doc=''' Interpolation method. ''')
def _get_vmin(self):
return self._vmin
def _set_vmin(self, vmin):
self._vmin = vmin
vmin = property(_get_vmin, _set_vmin,
doc=''' Minimal representable value. ''')
def _get_vmax(self):
return self._vmax
def _set_vmax(self, vmax):
self._vmax = vmax
vmax = property(_get_vmax, _set_vmax, <|fim▁hole|> doc=''' Maximal representable value. ''')
def _get_gridsize(self):
return self._gridsize
def _get_gridsize_x(self):
return self._gridsize[0]
def _get_gridsize_y(self):
return self._gridsize[1]
def _get_gridsize_z(self):
return self._gridsize[2]
def _set_gridsize(self, gridsize):
# Do we need to re-build shader ?
x,y,z = gridsize
x,y,z = max(0,x),max(0,y),max(0,z)
_x,_y,_z = self._gridsize
self._gridsize = x,y,z
if not (x+y+z)*(_x+_y+_z) and (x+y+z)+(_x+_y+_z):
self.build()
elif self._shader:
self._shader._gridsize = x,y,z
def _set_gridsize_x(self, x):
self.gridsize = (max(0,x), self._gridsize[1], self._gridsize[2])
def _set_gridsize_y(self, y):
self.gridsize = (self._gridsize[0], max(0,y), self._gridsize[2])
def _set_gridsize_z(self, z):
self.gridsize = (self._gridsize[0], self._gridsize[1], max(0,z))
gridsize = property(_get_gridsize, _set_gridsize,
doc=''' Image grid (x,y,z). ''')
def update(self):
''' Data update. '''
if self.vmin is None:
vmin = self.data.min()
else:
vmin = self.vmin
if self.vmax is None:
vmax = self._data.max()
else:
vmax = self.vmax
if vmin == vmax:
vmin, vmax = 0, 1
if self._lut:
s = self._lut.width
self._texture.update(bias = 1.0/(s-1)-vmin*((s-3.1)/(s-1))/(vmax-vmin),
scale = ((s-3.1)/(s-1))/(vmax-vmin))
else:
self._texture.update(bias=-vmin/(vmax-vmin),scale=1.0/(vmax-vmin))
def blit(self, x, y, w, h):
''' Blit array onto active framebuffer. '''
if self._shader:
self._shader.bind(self.texture,self._lut)
if self.origin == 'lower':
t=0,1
else:
t=1,0
gl.glColor(1,1,1,1)
self._texture.blit(x,y,w,h,t=t)
if self._shader:
self._shader.unbind()<|fim▁end|> | |
<|file_name|>xfixes-selection-notify.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#
# examples/xfixes-selection-notify.py -- demonstrate the XFIXES extension
# SelectionNotify event.
#
# Copyright (C) 2019
# Tony Crisci <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python 2/3 compatibility.
from __future__ import print_function
import sys
import os
import time
# Change path so we find Xlib
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Xlib.display import Display
from Xlib.ext import xfixes
def main(argv):
if len(sys.argv) != 2:
sys.exit('usage: {0} SELECTION\n\n'
'SELECTION is typically PRIMARY, SECONDARY or CLIPBOARD.\n'
.format(sys.argv[0]))
display = Display()
sel_name = sys.argv[1]
sel_atom = display.get_atom(sel_name)
if not display.has_extension('XFIXES'):
if display.query_extension('XFIXES') is None:
print('XFIXES extension not supported', file=sys.stderr)
return 1
xfixes_version = display.xfixes_query_version()
print('Found XFIXES version %s.%s' % (
xfixes_version.major_version,
xfixes_version.minor_version,
), file=sys.stderr)
screen = display.screen()
mask = xfixes.XFixesSetSelectionOwnerNotifyMask | \
xfixes.XFixesSelectionWindowDestroyNotifyMask | \
xfixes.XFixesSelectionClientCloseNotifyMask
display.xfixes_select_selection_input(screen.root, sel_atom, mask)
while True:
e = display.next_event()
print(e)
if (e.type, e.sub_code) == display.extension_event.SetSelectionOwnerNotify:
print('SetSelectionOwner: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionWindowDestroyNotify:
print('SelectionWindowDestroy: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionClientCloseNotify:
print('SelectionClientClose: owner=0x{0:08x}'.format(e.owner.id))
if __name__ == '__main__':
sys.exit(main(sys.argv))<|fim▁end|> | #!/usr/bin/python3 |
<|file_name|>change_publishers.py<|end_file_name|><|fim▁begin|>from casexml.apps.case.xform import get_case_ids_from_form
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.producer import producer
from corehq.apps.change_feed import data_sources
from corehq.form_processor.interfaces.dbaccessors import FormAccessors, CaseAccessors
from corehq.form_processor.signals import sql_case_post_save
from pillowtop.feed.interface import ChangeMeta
def republish_all_changes_for_form(domain, form_id):
"""
Publishes all changes for the form and any touched cases/ledgers.
"""
form = FormAccessors(domain=domain).get_form(form_id)
publish_form_saved(form)
for case in _get_cases_from_form(domain, form):
publish_case_saved(case, send_post_save_signal=False)
_publish_ledgers_from_form(domain, form)
def publish_form_saved(form):
producer.send_change(topics.FORM_SQL, change_meta_from_sql_form(form))
def change_meta_from_sql_form(form):
return ChangeMeta(
document_id=form.form_id,
data_source_type=data_sources.FORM_SQL,
data_source_name='form-sql', # todo: this isn't really needed.
document_type=form.doc_type,
document_subtype=form.xmlns,
domain=form.domain,
is_deletion=form.is_deleted,
)
def publish_form_deleted(domain, form_id):
producer.send_change(topics.FORM_SQL, ChangeMeta(
document_id=form_id,
data_source_type=data_sources.FORM_SQL,
data_source_name='form-sql',
document_type='XFormInstance-Deleted',
domain=domain,
is_deletion=True,
))
def publish_case_saved(case, send_post_save_signal=True):
"""
Publish the change to kafka and run case post-save signals.
"""
producer.send_change(topics.CASE_SQL, change_meta_from_sql_case(case))
if send_post_save_signal:
sql_case_post_save.send(case.__class__, case=case)
def change_meta_from_sql_case(case):
return ChangeMeta(
document_id=case.case_id,
data_source_type=data_sources.CASE_SQL,
data_source_name='case-sql', # todo: this isn't really needed.
document_type='CommCareCase',
document_subtype=case.type,
domain=case.domain,
is_deletion=case.is_deleted,
)
def publish_case_deleted(domain, case_id):
producer.send_change(topics.CASE_SQL, ChangeMeta(
document_id=case_id,
data_source_type=data_sources.CASE_SQL,
data_source_name='case-sql', # todo: this isn't really needed.
document_type='CommCareCase-Deleted',
domain=domain,
is_deletion=True,
))
def publish_ledger_v2_saved(ledger_value):
producer.send_change(topics.LEDGER, change_meta_from_ledger_v2(ledger_value))
<|fim▁hole|> data_source_type=data_sources.LEDGER_V2,
data_source_name='ledger-v2', # todo: this isn't really needed.
domain=ledger_value.domain,
is_deletion=False,
)
def publish_ledger_v1_saved(stock_state):
producer.send_change(topics.LEDGER, change_meta_from_ledger_v1(stock_state))
def change_meta_from_ledger_v1(stock_state):
return ChangeMeta(
document_id=stock_state.pk,
data_source_type=data_sources.LEDGER_V1,
data_source_name='ledger-v1', # todo: this isn't really needed.
domain=stock_state.domain,
is_deletion=False,
)
def _get_cases_from_form(domain, form):
from corehq.form_processor.parsers.ledgers.form import get_case_ids_from_stock_transactions
case_ids = get_case_ids_from_form(form) | get_case_ids_from_stock_transactions(form)
return CaseAccessors(domain).get_cases(list(case_ids))
def _publish_ledgers_from_form(domain, form):
from corehq.form_processor.parsers.ledgers.form import get_all_stock_report_helpers_from_form
unique_references = {
transaction.ledger_reference
for helper in get_all_stock_report_helpers_from_form(form)
for transaction in helper.transactions
}
for ledger_reference in unique_references:
producer.send_change(topics.LEDGER, _change_meta_from_ledger_reference(domain, ledger_reference))
def _change_meta_from_ledger_reference(domain, ledger_reference):
return ChangeMeta(
document_id=ledger_reference.as_id(),
data_source_type=data_sources.LEDGER_V2,
data_source_name='ledger-v2', # todo: this isn't really needed.
domain=domain,
is_deletion=False,
)<|fim▁end|> |
def change_meta_from_ledger_v2(ledger_value):
return ChangeMeta(
document_id=ledger_value.ledger_reference.as_id(), |
<|file_name|>gcc.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use any::Any;
use libc::c_void;
use rt::libunwind as uw;
struct Exception {
uwe: uw::_Unwind_Exception,
cause: Option<Box<Any + Send + 'static>>,
}
pub unsafe fn panic(data: Box<Any + Send + 'static>) -> ! {
let exception: Box<_> = box Exception {
uwe: uw::_Unwind_Exception {
exception_class: rust_exception_class(),
exception_cleanup: exception_cleanup,
private: [0; uw::unwinder_private_data_size],
},
cause: Some(data),
};
let exception_param = Box::into_raw(exception) as *mut uw::_Unwind_Exception;
let error = uw::_Unwind_RaiseException(exception_param);
rtabort!("Could not unwind stack, error = {}", error as isize);
extern fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
exception: *mut uw::_Unwind_Exception) {
rtdebug!("exception_cleanup()");
unsafe {
let _: Box<Exception> = Box::from_raw(exception as *mut Exception);
}
}
}
pub unsafe fn cleanup(ptr: *mut c_void) -> Box<Any + Send + 'static> {
let my_ep = ptr as *mut Exception;
rtdebug!("caught {}", (*my_ep).uwe.exception_class);
let cause = (*my_ep).cause.take();
uw::_Unwind_DeleteException(ptr as *mut _);
cause.unwrap()
}
// Rust's exception class identifier. This is used by personality routines to
// determine whether the exception was thrown by their own runtime.
fn rust_exception_class() -> uw::_Unwind_Exception_Class {
// M O Z \0 R U S T -- vendor, language
0x4d4f5a_00_52555354
}
// We could implement our personality routine in pure Rust, however exception
// info decoding is tedious. More importantly, personality routines have to
// handle various platform quirks, which are not fun to maintain. For this
// reason, we attempt to reuse personality routine of the C language:
// __gcc_personality_v0.
//
// Since C does not support exception catching, __gcc_personality_v0 simply
// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
//
// This is pretty close to Rust's exception handling approach, except that Rust
// does have a single "catch-all" handler at the bottom of each thread's stack.
// So we have two versions of the personality routine:
// - rust_eh_personality, used by all cleanup landing pads, which never catches,
// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
// - rust_eh_personality_catch, used only by rust_try(), which always catches.
//
// Note, however, that for implementation simplicity, rust_eh_personality_catch
// lacks code to install a landing pad, so in order to obtain exception object
// pointer (which it needs to return upstream), rust_try() employs another trick:
// it calls into the nested rust_try_inner(), whose landing pad does not resume
// unwinds. Instead, it extracts the exception pointer and performs a "normal"
// return.
//
// See also: rt/rust_try.ll
#[cfg(all(not(target_arch = "arm"),
not(all(windows, target_arch = "x86_64")),
not(test)))]
pub mod eabi {
use rt::libunwind as uw;
use libc::c_int;
extern "C" {
fn __gcc_personality_v0(version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
#[allow(private_no_mangle_fns)]
extern fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_v0(version, actions, exception_class, ue_header,
context)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
_version: c_int,
actions: uw::_Unwind_Action,
_exception_class: uw::_Unwind_Exception_Class,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
uw::_URC_INSTALL_CONTEXT
}
}
}
<|fim▁hole|>#[cfg(all(target_os = "ios", target_arch = "arm", not(test)))]
pub mod eabi {
use rt::libunwind as uw;
use libc::c_int;
extern "C" {
fn __gcc_personality_sj0(version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_sj0(version, actions, exception_class, ue_header,
context)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
_version: c_int,
actions: uw::_Unwind_Action,
_exception_class: uw::_Unwind_Exception_Class,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
unsafe {
__gcc_personality_sj0(_version, actions, _exception_class, _ue_header,
_context)
}
}
}
}
// ARM EHABI uses a slightly different personality routine signature,
// but otherwise works the same.
#[cfg(all(target_arch = "arm", not(target_os = "ios"), not(test)))]
pub mod eabi {
use rt::libunwind as uw;
use libc::c_int;
extern "C" {
fn __gcc_personality_v0(state: uw::_Unwind_State,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context)
-> uw::_Unwind_Reason_Code;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
#[allow(private_no_mangle_fns)]
extern "C" fn rust_eh_personality(
state: uw::_Unwind_State,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
unsafe {
__gcc_personality_v0(state, ue_header, context)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
state: uw::_Unwind_State,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (state as c_int & uw::_US_ACTION_MASK as c_int)
== uw::_US_VIRTUAL_UNWIND_FRAME as c_int { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
uw::_URC_INSTALL_CONTEXT
}
}
}
// Win64 SEH (see http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx)
//
// This looks a bit convoluted because rather than implementing a native SEH
// handler, GCC reuses the same personality routine as for the other
// architectures by wrapping it with an "API translator" layer
// (_GCC_specific_handler).
#[cfg(all(windows, target_arch = "x86_64", not(test)))]
#[doc(hidden)]
#[allow(non_camel_case_types, non_snake_case)]
pub mod eabi {
pub use self::EXCEPTION_DISPOSITION::*;
use rt::libunwind as uw;
use libc::{c_void, c_int};
#[repr(C)]
pub struct EXCEPTION_RECORD;
#[repr(C)]
pub struct CONTEXT;
#[repr(C)]
pub struct DISPATCHER_CONTEXT;
#[repr(C)]
#[derive(Copy, Clone)]
pub enum EXCEPTION_DISPOSITION {
ExceptionContinueExecution,
ExceptionContinueSearch,
ExceptionNestedException,
ExceptionCollidedUnwind
}
type _Unwind_Personality_Fn =
extern "C" fn(
version: c_int,
actions: uw::_Unwind_Action,
exception_class: uw::_Unwind_Exception_Class,
ue_header: *mut uw::_Unwind_Exception,
context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code;
extern "C" {
fn __gcc_personality_seh0(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT
) -> EXCEPTION_DISPOSITION;
fn _GCC_specific_handler(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT,
personality: _Unwind_Personality_Fn
) -> EXCEPTION_DISPOSITION;
}
#[lang="eh_personality"]
#[no_mangle] // referenced from rust_try.ll
#[allow(private_no_mangle_fns)]
extern "C" fn rust_eh_personality(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT
) -> EXCEPTION_DISPOSITION
{
unsafe {
__gcc_personality_seh0(exceptionRecord, establisherFrame,
contextRecord, dispatcherContext)
}
}
#[no_mangle] // referenced from rust_try.ll
pub extern "C" fn rust_eh_personality_catch(
exceptionRecord: *mut EXCEPTION_RECORD,
establisherFrame: *mut c_void,
contextRecord: *mut CONTEXT,
dispatcherContext: *mut DISPATCHER_CONTEXT
) -> EXCEPTION_DISPOSITION
{
extern "C" fn inner(
_version: c_int,
actions: uw::_Unwind_Action,
_exception_class: uw::_Unwind_Exception_Class,
_ue_header: *mut uw::_Unwind_Exception,
_context: *mut uw::_Unwind_Context
) -> uw::_Unwind_Reason_Code
{
if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
uw::_URC_HANDLER_FOUND // catch!
}
else { // cleanup phase
uw::_URC_INSTALL_CONTEXT
}
}
unsafe {
_GCC_specific_handler(exceptionRecord, establisherFrame,
contextRecord, dispatcherContext,
inner)
}
}
}<|fim▁end|> | // iOS on armv7 is using SjLj exceptions and therefore requires to use
// a specialized personality routine: __gcc_personality_sj0
|
<|file_name|>test_forms.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.models import User
from django.core import mail
from django.test.client import RequestFactory
from mock import ANY, patch
from nose.tools import eq_, ok_
from test_utils import TestCase
from remo.dashboard.forms import EmailRepsForm
from remo.profiles.tests import FunctionalAreaFactory, UserFactory
class EmailRepsFormsTest(TestCase):
def setUp(self):
self.functional_area = FunctionalAreaFactory.create()
def test_form_tampered_functional_area(self):
"""Test form with tampered data in functional area field."""
data = {'subject': 'Test email subject',
'body': None,
'functional_area': 'Non existing functional area'}
form = EmailRepsForm(data=data)
ok_(not form.is_valid())
eq_(len(form.errors['functional_area']), 1)
@patch('remo.dashboard.forms.messages.success')
def test_send_mail(self, fake_messages):
"""Test EmailRepsForm email sending functionality."""
data = {'subject': 'Test email subject',
'body': 'Test email body',
'functional_area': self.functional_area.id}
form = EmailRepsForm(data=data)
ok_(form.is_valid())
area = self.functional_area
UserFactory.create_batch(20, userprofile__functional_areas=[area])
factory = RequestFactory()
request = factory.request()
request.user = UserFactory.create()
reps = User.objects.filter(userprofile__functional_areas__name=area)<|fim▁hole|>
form.send_email(request, reps)
eq_(len(mail.outbox), 20)
def format_name(user):
return '%s %s <%s>' % (user.first_name, user.last_name, user.email)
recipients = map(format_name, reps)
receivers = []
for i in range(0, len(mail.outbox)):
eq_(mail.outbox[i].subject, data['subject'])
eq_(mail.outbox[i].body, data['body'])
receivers.append(mail.outbox[i].to[0])
eq_(set(receivers), set(recipients))
fake_messages.assert_called_with(ANY, 'Email sent successfully.')<|fim▁end|> | |
<|file_name|>table.go<|end_file_name|><|fim▁begin|>package lua
type lValueArraySorter struct {
L *LState
Fn *LFunction
Values []LValue
}
func (lv lValueArraySorter) Len() int {
return len(lv.Values)
}
func (lv lValueArraySorter) Swap(i, j int) {
lv.Values[i], lv.Values[j] = lv.Values[j], lv.Values[i]
}
func (lv lValueArraySorter) Less(i, j int) bool {
if lv.Fn != nil {
lv.L.Push(lv.Fn)
lv.L.Push(lv.Values[i])
lv.L.Push(lv.Values[j])
lv.L.Call(2, 1)
return LVAsBool(lv.L.reg.Pop())
}
return lessThan(lv.L, lv.Values[i], lv.Values[j])
}
func newLTable(acap int, hcap int) *LTable {
if acap < 0 {
acap = 0
}
if hcap < 0 {
hcap = 0
}
tb := <able{
array: make([]LValue, 0, acap),
dict: make(map[LValue]LValue, hcap),
keys: nil,
k2i: nil,
Metatable: LNil,
}
return tb
}
func (tb *LTable) Len() int {
var prev LValue = LNil
for i := len(tb.array) - 1; i >= 0; i-- {
v := tb.array[i]
if prev == LNil && v != LNil {
return i + 1
}
prev = v
}
return 0
}
func (tb *LTable) Append(value LValue) {
tb.array = append(tb.array, value)
}
func (tb *LTable) Insert(i int, value LValue) {
if i > len(tb.array) {
tb.RawSetInt(i, value)
return
}
if i <= 0 {
tb.RawSet(LNumber(i), value)
return
}
i -= 1
tb.array = append(tb.array, LNil)
copy(tb.array[i+1:], tb.array[i:])
tb.array[i] = value
}
func (tb *LTable) MaxN() int {
for i := len(tb.array) - 1; i >= 0; i-- {
if tb.array[i] != LNil {
return i
}
}
return 0
}
func (tb *LTable) Remove(pos int) {
i := pos - 1
larray := len(tb.array)
switch {
case i >= larray:
return
case i == larray-1 || i < 0:
tb.array = tb.array[:larray-1]
default:
copy(tb.array[i:], tb.array[i+1:])
tb.array[larray-1] = nil
tb.array = tb.array[:larray-1]
}
}
func (tb *LTable) RawSet(key LValue, value LValue) {
switch v := key.(type) {
case LNumber:
if isArrayKey(v) {
index := int(v) - 1
alen := len(tb.array)
switch {
case index == alen:
tb.array = append(tb.array, value)
case index > alen:
for i := 0; i < (index - alen); i++ {
tb.array = append(tb.array, LNil)
}
tb.array = append(tb.array, value)
case index < alen:
tb.array[index] = value
}
return
}
}
tb.dict[key] = value
}
func (tb *LTable) RawSetInt(key int, value LValue) {
if key < 1 || key >= MaxArrayIndex {
tb.dict[LNumber(key)] = value
return
}
index := key - 1
alen := len(tb.array)
switch {
case index == alen:
tb.array = append(tb.array, value)
case index > alen:
for i := 0; i < (index - alen); i++ {
tb.array = append(tb.array, LNil)
}
tb.array = append(tb.array, value)
case index < alen:
tb.array[index] = value
}
}
func (tb *LTable) RawSetH(key LValue, value LValue) {
tb.dict[key] = value
}
func (tb *LTable) RawGet(key LValue) LValue {
switch v := key.(type) {
case LNumber:
if isArrayKey(v) {
index := int(v) - 1
if index >= len(tb.array) {
return LNil
}
return tb.array[index]
}
}
if v, ok := tb.dict[key]; ok {
return v
}
return LNil
}
func (tb *LTable) RawGetInt(key int) LValue {
index := int(key) - 1
if index >= len(tb.array) {
return LNil
}
return tb.array[index]
}
func (tb *LTable) RawGetH(key LValue) LValue {
if v, ok := tb.dict[key]; ok {
return v
}<|fim▁hole|>}
func (tb *LTable) ForEach(cb func(LValue, LValue)) {
for i, v := range tb.array {
if v != LNil {
cb(LNumber(i+1), v)
}
}
for k, v := range tb.dict {
if v != LNil {
cb(k, v)
}
}
}
func (tb *LTable) Next(key LValue) (LValue, LValue) {
// TODO: inefficient way
if key == LNil {
tb.keys = nil
tb.k2i = nil
key = LNumber(0)
}
if tb.keys == nil {
tb.keys = make([]LValue, len(tb.dict))
tb.k2i = make(map[LValue]int)
i := 0
for k, _ := range tb.dict {
tb.keys[i] = k
tb.k2i[k] = i
i++
}
}
if kv, ok := key.(LNumber); ok && isInteger(kv) && int(kv) >= 0 {
index := int(kv)
for ; index < len(tb.array); index++ {
if v := tb.array[index]; v != LNil {
return LNumber(index + 1), v
}
}
if index == len(tb.array) {
if len(tb.dict) == 0 {
tb.keys = nil
tb.k2i = nil
return LNil, LNil
}
key = tb.keys[0]
if v := tb.dict[key]; v != LNil {
return key, v
}
}
}
for i := tb.k2i[key] + 1; i < len(tb.dict); i++ {
key = tb.keys[i]
if v := tb.dict[key]; v != LNil {
return key, v
}
}
tb.keys = nil
tb.k2i = nil
return LNil, LNil
}<|fim▁end|> | return LNil |
<|file_name|>CommonMetricsFactory.java<|end_file_name|><|fim▁begin|>package org.cache2k.core;
/*
* #%L
* cache2k core implementation
* %%
* Copyright (C) 2000 - 2021 headissue GmbH, Munich
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import org.cache2k.core.api.CommonMetrics;
/**<|fim▁hole|> */
public interface CommonMetricsFactory {
CommonMetrics.Updater create(Parameters p);
interface Parameters {
boolean isDisabled();
boolean isPrecise();
}
}<|fim▁end|> | * @author Jens Wilke |
<|file_name|>agent.go<|end_file_name|><|fim▁begin|>/*
Blackbox agent is a client side program which encrypt/decrypt files.
It request blackbox server for encryption keys when need to do operations.
Agent never keeps the key.
*/
package main
import (
"flag"
"github.com/ksang/blackbox/agent/cli"
"github.com/ksang/blackbox/agent/operation"
"log"
)
func main() {
args, err := cli.Parse()
if err != nil {<|fim▁hole|> return
}
switch args.Mode {
case cli.MODE_ENCRYPT:
if len(args.FilePath) > 0 {
err = operation.AesEncryptFileAuto(args, args.FilePath)
} else if len(args.FolderPath) > 0 {
err = operation.AesEncryptFolderAuto(args, args.FolderPath)
}
if err != nil {
log.Fatal("FAILED:", err)
return
}
case cli.MODE_DECRYPT:
if len(args.FilePath) > 0 {
err = operation.AesDecryptFileAuto(args, args.FilePath)
} else if len(args.FolderPath) > 0 {
err = operation.AesDecryptFolderAuto(args, args.FolderPath)
}
if err != nil {
log.Fatal("FAILED:", err)
return
}
}
}<|fim▁end|> | log.Print(err)
flag.PrintDefaults() |
<|file_name|>SocketAppenderBuilderTest.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.core.appender;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SocketAppenderBuilderTest {
/**
* Tests https://issues.apache.org/jira/browse/LOG4J2-1620
*/
@Test<|fim▁hole|> public void testDefaultImmediateFlush() {
assertTrue(SocketAppender.newBuilder().isImmediateFlush(),
"Regression of LOG4J2-1620: default value for immediateFlush should be true");
}
}<|fim▁end|> | |
<|file_name|>0020_auto__add_field_question_order.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.order'
db.add_column(u'survey_question', 'order',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.order'
db.delete_column(u'survey_question', 'order')
models = {
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'})
},
u'survey.page': {<|fim▁hole|> 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.question': {
'Meta': {'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'})
}
}
complete_apps = ['survey']<|fim▁end|> | 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), |
<|file_name|>test_interval.py<|end_file_name|><|fim▁begin|>import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas import (
Categorical,
Index,
Interval,
IntervalIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),
(Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),
(
timedelta_range("0 days", periods=3).insert(4, pd.NaT),
timedelta_range("1 day", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3).insert(4, pd.NaT),
date_range("20170102", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3, tz="US/Eastern").insert(4, pd.NaT),
date_range("20170102", periods=3, tz="US/Eastern").insert(4, pd.NaT),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
@pytest.fixture
def array(left_right_dtypes):
"""
Fixture to generate an IntervalArray of various dtypes containing NA if possible
"""
left, right = left_right_dtypes
return IntervalArray.from_arrays(left, right)
def create_categorical_intervals(left, right, closed="right"):
return Categorical(IntervalIndex.from_arrays(left, right, closed))
def create_series_intervals(left, right, closed="right"):
return Series(IntervalArray.from_arrays(left, right, closed))
def create_series_categorical_intervals(left, right, closed="right"):
return Series(Categorical(IntervalIndex.from_arrays(left, right, closed)))
class TestComparison:
@pytest.fixture(params=[operator.eq, operator.ne])
def op(self, request):
return request.param
@pytest.fixture(
params=[
IntervalArray.from_arrays,
IntervalIndex.from_arrays,
create_categorical_intervals,
create_series_intervals,
create_series_categorical_intervals,
],
ids=[
"IntervalArray",
"IntervalIndex",
"Categorical[Interval]",
"Series[Interval]",
"Series[Categorical[Interval]]",
],
)
def interval_constructor(self, request):
"""
Fixture for all pandas native interval constructors.
To be used as the LHS of IntervalArray comparisons.
"""
return request.param
def elementwise_comparison(self, op, array, other):
"""
Helper that performs elementwise comparisons between `array` and `other`
"""
other = other if is_list_like(other) else [other] * len(array)
return np.array([op(x, y) for x, y in zip(array, other)])<|fim▁hole|> result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# matches on a single endpoint but not both
other = Interval(array.left[0], array.right[1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = Interval(0, 1, closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_na(self, op, array, nulls_fixture, request):
result = op(array, nulls_fixture)
expected = self.elementwise_comparison(op, array, nulls_fixture)
if nulls_fixture is pd.NA and array.dtype != pd.IntervalDtype("int64"):
mark = pytest.mark.xfail(
reason="broken for non-integer IntervalArray; see GH 31882"
)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
0,
1.0,
True,
"foo",
Timestamp("2017-01-01"),
Timestamp("2017-01-01", tz="US/Eastern"),
Timedelta("0 days"),
Period("2017-01-01", "D"),
],
)
def test_compare_scalar_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval(
self, op, array, interval_constructor,
):
# same endpoints
other = interval_constructor(array.left, array.right)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# different endpoints
other = interval_constructor(array.left[::-1], array.right[::-1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# all nan endpoints
other = interval_constructor([np.nan] * 4, [np.nan] * 4)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval_mixed_closed(
self, op, interval_constructor, closed, other_closed
):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = interval_constructor(range(2), range(1, 3), closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
(
Interval(0, 1),
Interval(Timedelta("1 day"), Timedelta("2 days")),
Interval(4, 5, "both"),
Interval(10, 20, "neither"),
),
(0, 1.5, Timestamp("20170103"), np.nan),
(
Timestamp("20170102", tz="US/Eastern"),
Timedelta("2 days"),
"baz",
pd.NaT,
),
],
)
def test_compare_list_like_object(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_nan(self, op, array, nulls_fixture, request):
other = [nulls_fixture] * 4
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
if nulls_fixture is pd.NA and array.dtype.subtype != "i8":
reason = "broken for non-integer IntervalArray; see GH 31882"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
np.arange(4, dtype="int64"),
np.arange(4, dtype="float64"),
date_range("2017-01-01", periods=4),
date_range("2017-01-01", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
period_range("2017-01-01", periods=4, freq="D"),
Categorical(list("abab")),
Categorical(date_range("2017-01-01", periods=4)),
pd.array(list("abcd")),
pd.array(["foo", 3.14, None, object()]),
],
ids=lambda x: str(x.dtype),
)
def test_compare_list_like_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("length", [1, 3, 5])
@pytest.mark.parametrize("other_constructor", [IntervalArray, list])
def test_compare_length_mismatch_errors(self, op, other_constructor, length):
array = IntervalArray.from_arrays(range(4), range(1, 5))
other = other_constructor([Interval(0, 1)] * length)
with pytest.raises(ValueError, match="Lengths must match to compare"):
op(array, other)
@pytest.mark.parametrize(
"constructor, expected_type, assert_func",
[
(IntervalIndex, np.array, tm.assert_numpy_array_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_index_series_compat(self, op, constructor, expected_type, assert_func):
# IntervalIndex/Series that rely on IntervalArray for comparisons
breaks = range(4)
index = constructor(IntervalIndex.from_breaks(breaks))
# scalar comparisons
other = index[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = breaks[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
# list-like comparisons
other = IntervalArray.from_breaks(breaks)
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = [index[0], breaks[0], "foo"]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH #28981
expected = Series([False, False])
s = pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")
result = s == scalars
tm.assert_series_equal(result, expected)<|fim▁end|> |
def test_compare_scalar_interval(self, op, array):
# matches first interval
other = array[0] |
<|file_name|>user-resource.go<|end_file_name|><|fim▁begin|>package main
import (
"log"
"net/http"
restfulspec "github.com/emicklei/go-restful-openapi/v2"
"github.com/emicklei/go-restful/v3"
"github.com/go-openapi/spec"
)
type UserResource struct {
// normally one would use DAO (data access object)
users map[string]User
}
func (u UserResource) WebService() *restful.WebService {
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well
tags := []string{"users"}
<|fim▁hole|> Doc("get all users").
Metadata(restfulspec.KeyOpenAPITags, tags).
Writes([]User{}).
Returns(200, "OK", []User{}).
DefaultReturns("OK", []User{}))
ws.Route(ws.GET("/{user-id}").To(u.findUser).
// docs
Doc("get a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("integer").DefaultValue("1")).
Metadata(restfulspec.KeyOpenAPITags, tags).
Writes(User{}). // on the response
Returns(200, "OK", User{}).
Returns(404, "Not Found", nil).
DefaultReturns("OK", User{}))
ws.Route(ws.PUT("/{user-id}").To(u.updateUser).
// docs
Doc("update a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
Metadata(restfulspec.KeyOpenAPITags, tags).
Reads(User{})) // from the request
ws.Route(ws.PUT("").To(u.createUser).
// docs
Doc("create a user").
Metadata(restfulspec.KeyOpenAPITags, tags).
Reads(User{})) // from the request
ws.Route(ws.DELETE("/{user-id}").To(u.removeUser).
// docs
Doc("delete a user").
Metadata(restfulspec.KeyOpenAPITags, tags).
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")))
return ws
}
// GET http://localhost:8080/users
//
func (u UserResource) findAllUsers(request *restful.Request, response *restful.Response) {
list := []User{}
for _, each := range u.users {
list = append(list, each)
}
response.WriteEntity(list)
}
// GET http://localhost:8080/users/1
//
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
usr := u.users[id]
if len(usr.ID) == 0 {
response.WriteErrorString(http.StatusNotFound, "User could not be found.")
} else {
response.WriteEntity(usr)
}
}
// PUT http://localhost:8080/users/1
// <User><Id>1</Id><Name>Melissa Raspberry</Name></User>
//
func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) {
usr := new(User)
err := request.ReadEntity(&usr)
if err == nil {
u.users[usr.ID] = *usr
response.WriteEntity(usr)
} else {
response.WriteError(http.StatusInternalServerError, err)
}
}
// PUT http://localhost:8080/users/1
// <User><Id>1</Id><Name>Melissa</Name></User>
//
func (u *UserResource) createUser(request *restful.Request, response *restful.Response) {
usr := User{ID: request.PathParameter("user-id")}
err := request.ReadEntity(&usr)
if err == nil {
u.users[usr.ID] = usr
response.WriteHeaderAndEntity(http.StatusCreated, usr)
} else {
response.WriteError(http.StatusInternalServerError, err)
}
}
// DELETE http://localhost:8080/users/1
//
func (u *UserResource) removeUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
delete(u.users, id)
}
func main() {
u := UserResource{map[string]User{}}
restful.DefaultContainer.Add(u.WebService())
config := restfulspec.Config{
WebServices: restful.RegisteredWebServices(), // you control what services are visible
APIPath: "/apidocs.json",
PostBuildSwaggerObjectHandler: enrichSwaggerObject}
restful.DefaultContainer.Add(restfulspec.NewOpenAPIService(config))
// Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API
// You need to download the Swagger HTML5 assets and change the FilePath location in the config below.
// Open http://localhost:8080/apidocs/?url=http://localhost:8080/apidocs.json
http.Handle("/apidocs/", http.StripPrefix("/apidocs/", http.FileServer(http.Dir("/Users/emicklei/Projects/swagger-ui/dist"))))
// Optionally, you may need to enable CORS for the UI to work.
cors := restful.CrossOriginResourceSharing{
AllowedHeaders: []string{"Content-Type", "Accept"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
CookiesAllowed: false,
Container: restful.DefaultContainer}
restful.DefaultContainer.Filter(cors.Filter)
log.Printf("Get the API using http://localhost:8080/apidocs.json")
log.Printf("Open Swagger UI using http://localhost:8080/apidocs/?url=http://localhost:8080/apidocs.json")
log.Fatal(http.ListenAndServe(":8080", nil))
}
func enrichSwaggerObject(swo *spec.Swagger) {
swo.Info = &spec.Info{
InfoProps: spec.InfoProps{
Title: "UserService",
Description: "Resource for managing Users",
Contact: &spec.ContactInfo{
Name: "john",
Email: "[email protected]",
URL: "http://johndoe.org",
},
License: &spec.License{
Name: "MIT",
URL: "http://mit.org",
},
Version: "1.0.0",
},
}
swo.Tags = []spec.Tag{spec.Tag{TagProps: spec.TagProps{
Name: "users",
Description: "Managing users"}}}
}
// User is just a sample type
type User struct {
ID string `json:"id" description:"identifier of the user"`
Name string `json:"name" description:"name of the user" default:"john"`
Age int `json:"age" description:"age of the user" default:"21"`
}<|fim▁end|> | ws.Route(ws.GET("/").To(u.findAllUsers).
// docs |
<|file_name|>278_test_io.py<|end_file_name|><|fim▁begin|>"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Crash when decoder returns non-string
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()<|fim▁hole|>
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()<|fim▁end|> | |
<|file_name|>storage-mocked.js<|end_file_name|><|fim▁begin|>/**
* Convenient Redis Storage mock for testing purposes
*/
var util = require ('util');
function StorageMocked(data){
data = data || {};
this.currentOutage = data.currentOutage;
}
exports = module.exports = StorageMocked;
StorageMocked.prototype.startOutage = function (service, outageData, callback) {
this.currentOutage = outageData;
setImmediate(function(){
callback(null);
});
};
StorageMocked.prototype.getCurrentOutage = function (service, callback) {
var self = this;
setImmediate(function(){
callback(null, self.currentOutage);
});
};
StorageMocked.prototype.saveLatency = function (service, timestamp, latency, callback) {
setImmediate(function(){
callback(null);
});
};
StorageMocked.prototype.archiveCurrentOutageIfExists = function (service, callback) {
var self = this;
setImmediate(function(){
callback(null, self.currentOutage);<|fim▁hole|> });
};
StorageMocked.prototype.flush_database = function (callback){
setImmediate(function(){
callback(null);
});
};<|fim▁end|> | |
<|file_name|>search.js<|end_file_name|><|fim▁begin|>'use strict';
const assert = require('assert');
const context = require('../helpers/context');
describe('ctx.search=', () => {
it('should replace the search', () => {
const ctx = context({ url: '/store/shoes' });
ctx.search = '?page=2&color=blue';
assert.equal(ctx.url, '/store/shoes?page=2&color=blue');
assert.equal(ctx.search, '?page=2&color=blue');
});
it('should update ctx.querystring and ctx.query', () => {
const ctx = context({ url: '/store/shoes' });
ctx.search = '?page=2&color=blue';
assert.equal(ctx.url, '/store/shoes?page=2&color=blue');
assert.equal(ctx.querystring, 'page=2&color=blue');
assert.equal(ctx.query.page, '2');
assert.equal(ctx.query.color, 'blue');
});
it('should change .url but not .originalUrl', () => {
const ctx = context({ url: '/store/shoes' });
ctx.search = '?page=2&color=blue';
assert.equal(ctx.url, '/store/shoes?page=2&color=blue');
assert.equal(ctx.originalUrl, '/store/shoes');
assert.equal(ctx.request.originalUrl, '/store/shoes');<|fim▁hole|>
describe('when missing', () => {
it('should return ""', () => {
const ctx = context({ url: '/store/shoes' });
assert.equal(ctx.search, '');
});
});
});<|fim▁end|> | }); |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::env;
use std::process;
extern crate concurrent_prime_sieve;
use concurrent_prime_sieve::filter::prime_filter;
mod draw_spiral;
fn help() -> !{
println!("Can only use up to 2 environment variables. E.G.");
println!(">>>cargo run <size> <write_to_path>");
process::exit(0);
}
fn get_size_from_env(env_args: &Vec<String>, default_size: usize) -> usize {
match env_args[1].trim().parse(){
Ok(n) => n,
Err(_) => default_size,
}
}
fn get_env(default_size: usize) -> (String, usize){
let env_args: Vec<String> = env::args().collect();
let (path, size) = match env_args.len(){
1 => ("", default_size),
2 => ("", get_size_from_env(&env_args, default_size)),
3 => (env_args[2].as_str(), get_size_from_env(&env_args, default_size)),
_ => help(),
};
(String::from(path), size)
}
fn main() {
let (path, size) = get_env(200);
// println!("{}", path);
let num_primes = size*size + 1;<|fim▁hole|> // println!("{} is prime: {}", i, j);
// }
}<|fim▁end|> | let is_prime_iter = prime_filter(num_primes);
draw_spiral::prime_filter_to_spiral_png(size, is_prime_iter, path);
// for (i, j) in is_prime_iter.iter().enumerate(){ |
<|file_name|>_001570_dotProdSparseVectors.cpp<|end_file_name|><|fim▁begin|>class SparseVector {
unordered_map<int, int> repr;
int size = 0;
public:
SparseVector(vector<int> &nums) {
size = nums.size();
for (int i=0; i<nums.size(); i++) {
if (nums[i] == 0) { continue; }
repr[i] = nums[i];
}
}
// Return the dotProduct of two sparse vectors
int dotProduct(SparseVector& vec) {
if (size != vec.size) {return 0;} // incompatible
int dp=0;
for (const auto& kv : vec.repr) {
if (repr.find(kv.first) == repr.end()) continue;
dp += kv.second * repr[kv.first];
}
return dp;
}
};
<|fim▁hole|><|fim▁end|> | // Your SparseVector object will be instantiated and called as such:
// SparseVector v1(nums1);
// SparseVector v2(nums2);
// int ans = v1.dotProduct(v2); |
<|file_name|>views.py<|end_file_name|><|fim▁begin|>from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.core.paginator import Paginator
from uwcs_website.choob.models import *
def quotes_page(request):
quoters = map(lambda q:q[0],QuoteObject.objects.all().values_list('quoter').distinct())
quoted = map(lambda q:q[0],QuoteLine.objects.all().values_list('nick').distinct())
return render_to_response('choob/quotes.html',{
'breadcrumbs': [('/','home'),('/irc/','irc')],
'user':request.user,
'quoters':quoters,
'quoted':quoted,
})
PER_PAGE = 20
def quotes_f(request,page_num,url,f):
'''
Generic quotes controller for making lists of quotes
type(f) = String -> [QuoteObject]
'''
if request.method == 'POST':
val = request.POST['val']
paginator = Paginator(f(val),PER_PAGE)
return render_to_response('choob/quote_list.html',{
'breadcrumbs': [('/','home'),('/irc/','irc')],
'user':request.user,
'page':paginator.page(page_num),
'value':val,
'url':url,
})
else:
return HttpResponseRedirect('/irc/all_quotes/')
def all_quotes(request,page_num):
paginator = Paginator(QuoteObject.objects.all(),PER_PAGE)
return render_to_response('choob/quote_list.html',{<|fim▁hole|> })
# this is clearly not idiomatic in languages without currying
# perhaps someone can suggest something else
def quotes_by(request,page):
return quotes_f(request,page,'quotes_by',
lambda n:QuoteObject.objects.filter(quoter__exact=n))
def quotes_from(request,page):
return quotes_f(request,page,'quotes_from',
lambda n:QuoteObject.objects.filter(quoteline__nick__exact=n))
def quotes_with(request,page):
return quotes_f(request,page,'quotes_with',
lambda v:QuoteObject.objects.filter(quoteline__message__contains=v))<|fim▁end|> | 'breadcrumbs': [('/','home'),('/irc/','irc'),('/irc/all_quotes/1/','all')],
'user':request.user,
'page':paginator.page(page_num), |
<|file_name|>live_timers.rs<|end_file_name|><|fim▁begin|>use std::collections::HashMap;
use platform::time::time_now;
use super::StartTime;
use super::Timing;
#[derive(Debug, Clone, PartialEq)]
pub struct LiveTimers {
timers: HashMap<String, StartTime>,
}
impl LiveTimers {
pub fn new() -> LiveTimers {
LiveTimers { timers: HashMap::new() }
}
pub fn get_timers(&self) -> &HashMap<String, StartTime> {
&self.timers
}
pub fn start(&mut self, name: &str) -> StartTime {
let start_time = time_now();
self.timers.insert(name.to_string(), start_time.clone());
start_time
}
pub fn stop(&mut self, name: &str) -> Timing {
let stop_time = time_now();
let opt = self.timers.remove(name);
if opt.is_none() {
panic!("Tried to stop non-live timer: {:?}", name);
}
<|fim▁hole|> Timing::new(name, start_time, duration)
}
}<|fim▁end|> | let start_time = opt.unwrap();
let duration = stop_time - start_time;
|
<|file_name|>post_list_test.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package model
import (
"strings"
"testing"
)
func TestPostListJson(t *testing.T) {
pl := PostList{}
p1 := &Post{Id: NewId(), Message: NewId()}
pl.AddPost(p1)
p2 := &Post{Id: NewId(), Message: NewId()}
pl.AddPost(p2)
pl.AddOrder(p1.Id)<|fim▁hole|>
json := pl.ToJson()
rpl := PostListFromJson(strings.NewReader(json))
if rpl.Posts[p1.Id].Message != p1.Message {
t.Fatal("failed to serialize")
}
if rpl.Posts[p2.Id].Message != p2.Message {
t.Fatal("failed to serialize")
}
if rpl.Order[1] != p2.Id {
t.Fatal("failed to serialize")
}
}<|fim▁end|> | pl.AddOrder(p2.Id) |
<|file_name|>block.go<|end_file_name|><|fim▁begin|>// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package types contains data types related to Ethereum consensus.
package types
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"math/big"
"sort"
"sync/atomic"
"time"
"github.com/kejace/go-ethereum/common"
"github.com/kejace/go-ethereum/crypto/sha3"
"github.com/kejace/go-ethereum/rlp"
)
var (
EmptyRootHash = DeriveSha(Transactions{})
EmptyUncleHash = CalcUncleHash(nil)
)
var (
errMissingHeaderMixDigest = errors.New("missing mixHash in JSON block header")
errMissingHeaderFields = errors.New("missing required JSON block header fields")
errBadNonceSize = errors.New("invalid block nonce size, want 8 bytes")
)
// A BlockNonce is a 64-bit hash which proves (combined with the
// mix-hash) that a sufficient amount of computation has been carried
// out on a block.
type BlockNonce [8]byte
// EncodeNonce converts the given integer to a block nonce.
func EncodeNonce(i uint64) BlockNonce {
var n BlockNonce
binary.BigEndian.PutUint64(n[:], i)
return n
}
// Uint64 returns the integer value of a block nonce.
func (n BlockNonce) Uint64() uint64 {
return binary.BigEndian.Uint64(n[:])
}
// MarshalJSON implements json.Marshaler
func (n BlockNonce) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"0x%x"`, n)), nil
}
// UnmarshalJSON implements json.Unmarshaler
func (n *BlockNonce) UnmarshalJSON(input []byte) error {
var b hexBytes
if err := b.UnmarshalJSON(input); err != nil {
return err
}
if len(b) != 8 {
return errBadNonceSize
}
copy((*n)[:], b)
return nil
}
// Header represents a block header in the Ethereum blockchain.
type Header struct {
ParentHash common.Hash // Hash to the previous block
UncleHash common.Hash // Uncles of this block
Coinbase common.Address // The coin base address
Root common.Hash // Block Trie state
TxHash common.Hash // Tx sha
ReceiptHash common.Hash // Receipt sha
Bloom Bloom // Bloom
Difficulty *big.Int // Difficulty for the current block
Number *big.Int // The block number
GasLimit *big.Int // Gas limit
GasUsed *big.Int // Gas used
Time *big.Int // Creation time
Extra []byte // Extra data
MixDigest common.Hash // for quick difficulty verification
Nonce BlockNonce
}
type jsonHeader struct {
ParentHash *common.Hash `json:"parentHash"`<|fim▁hole|> Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom *Bloom `json:"logsBloom"`
Difficulty *hexBig `json:"difficulty"`
Number *hexBig `json:"number"`
GasLimit *hexBig `json:"gasLimit"`
GasUsed *hexBig `json:"gasUsed"`
Time *hexBig `json:"timestamp"`
Extra *hexBytes `json:"extraData"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *BlockNonce `json:"nonce"`
}
// Hash returns the block hash of the header, which is simply the keccak256 hash of its
// RLP encoding.
func (h *Header) Hash() common.Hash {
return rlpHash(h)
}
// HashNoNonce returns the hash which is used as input for the proof-of-work search.
func (h *Header) HashNoNonce() common.Hash {
return rlpHash([]interface{}{
h.ParentHash,
h.UncleHash,
h.Coinbase,
h.Root,
h.TxHash,
h.ReceiptHash,
h.Bloom,
h.Difficulty,
h.Number,
h.GasLimit,
h.GasUsed,
h.Time,
h.Extra,
})
}
// MarshalJSON encodes headers into the web3 RPC response block format.
func (h *Header) MarshalJSON() ([]byte, error) {
return json.Marshal(&jsonHeader{
ParentHash: &h.ParentHash,
UncleHash: &h.UncleHash,
Coinbase: &h.Coinbase,
Root: &h.Root,
TxHash: &h.TxHash,
ReceiptHash: &h.ReceiptHash,
Bloom: &h.Bloom,
Difficulty: (*hexBig)(h.Difficulty),
Number: (*hexBig)(h.Number),
GasLimit: (*hexBig)(h.GasLimit),
GasUsed: (*hexBig)(h.GasUsed),
Time: (*hexBig)(h.Time),
Extra: (*hexBytes)(&h.Extra),
MixDigest: &h.MixDigest,
Nonce: &h.Nonce,
})
}
// UnmarshalJSON decodes headers from the web3 RPC response block format.
func (h *Header) UnmarshalJSON(input []byte) error {
var dec jsonHeader
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
// Ensure that all fields are set. MixDigest is checked separately because
// it is a recent addition to the spec (as of August 2016) and older RPC server
// implementations might not provide it.
if dec.MixDigest == nil {
return errMissingHeaderMixDigest
}
if dec.ParentHash == nil || dec.UncleHash == nil || dec.Coinbase == nil ||
dec.Root == nil || dec.TxHash == nil || dec.ReceiptHash == nil ||
dec.Bloom == nil || dec.Difficulty == nil || dec.Number == nil ||
dec.GasLimit == nil || dec.GasUsed == nil || dec.Time == nil ||
dec.Extra == nil || dec.Nonce == nil {
return errMissingHeaderFields
}
// Assign all values.
h.ParentHash = *dec.ParentHash
h.UncleHash = *dec.UncleHash
h.Coinbase = *dec.Coinbase
h.Root = *dec.Root
h.TxHash = *dec.TxHash
h.ReceiptHash = *dec.ReceiptHash
h.Bloom = *dec.Bloom
h.Difficulty = (*big.Int)(dec.Difficulty)
h.Number = (*big.Int)(dec.Number)
h.GasLimit = (*big.Int)(dec.GasLimit)
h.GasUsed = (*big.Int)(dec.GasUsed)
h.Time = (*big.Int)(dec.Time)
h.Extra = *dec.Extra
h.MixDigest = *dec.MixDigest
h.Nonce = *dec.Nonce
return nil
}
func rlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
}
// Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together.
type Body struct {
Transactions []*Transaction
Uncles []*Header
}
// Block represents an entire block in the Ethereum blockchain.
type Block struct {
header *Header
uncles []*Header
transactions Transactions
// caches
hash atomic.Value
size atomic.Value
// Td is used by package core to store the total difficulty
// of the chain up to and including the block.
td *big.Int
// These fields are used by package eth to track
// inter-peer block relay.
ReceivedAt time.Time
ReceivedFrom interface{}
}
// DeprecatedTd is an old relic for extracting the TD of a block. It is in the
// code solely to facilitate upgrading the database from the old format to the
// new, after which it should be deleted. Do not use!
func (b *Block) DeprecatedTd() *big.Int {
return b.td
}
// [deprecated by eth/63]
// StorageBlock defines the RLP encoding of a Block stored in the
// state database. The StorageBlock encoding contains fields that
// would otherwise need to be recomputed.
type StorageBlock Block
// "external" block encoding. used for eth protocol, etc.
type extblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
}
// [deprecated by eth/63]
// "storage" block encoding. used for database.
type storageblock struct {
Header *Header
Txs []*Transaction
Uncles []*Header
TD *big.Int
}
// NewBlock creates a new block. The input data is copied,
// changes to header and to the field values will not affect the
// block.
//
// The values of TxHash, UncleHash, ReceiptHash and Bloom in header
// are ignored and set to values derived from the given txs, uncles
// and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
b := &Block{header: CopyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 {
b.header.TxHash = EmptyRootHash
} else {
b.header.TxHash = DeriveSha(Transactions(txs))
b.transactions = make(Transactions, len(txs))
copy(b.transactions, txs)
}
if len(receipts) == 0 {
b.header.ReceiptHash = EmptyRootHash
} else {
b.header.ReceiptHash = DeriveSha(Receipts(receipts))
b.header.Bloom = CreateBloom(receipts)
}
if len(uncles) == 0 {
b.header.UncleHash = EmptyUncleHash
} else {
b.header.UncleHash = CalcUncleHash(uncles)
b.uncles = make([]*Header, len(uncles))
for i := range uncles {
b.uncles[i] = CopyHeader(uncles[i])
}
}
return b
}
// NewBlockWithHeader creates a block with the given header data. The
// header data is copied, changes to header and to the field values
// will not affect the block.
func NewBlockWithHeader(header *Header) *Block {
return &Block{header: CopyHeader(header)}
}
// CopyHeader creates a deep copy of a block header to prevent side effects from
// modifying a header variable.
func CopyHeader(h *Header) *Header {
cpy := *h
if cpy.Time = new(big.Int); h.Time != nil {
cpy.Time.Set(h.Time)
}
if cpy.Difficulty = new(big.Int); h.Difficulty != nil {
cpy.Difficulty.Set(h.Difficulty)
}
if cpy.Number = new(big.Int); h.Number != nil {
cpy.Number.Set(h.Number)
}
if cpy.GasLimit = new(big.Int); h.GasLimit != nil {
cpy.GasLimit.Set(h.GasLimit)
}
if cpy.GasUsed = new(big.Int); h.GasUsed != nil {
cpy.GasUsed.Set(h.GasUsed)
}
if len(h.Extra) > 0 {
cpy.Extra = make([]byte, len(h.Extra))
copy(cpy.Extra, h.Extra)
}
return &cpy
}
// DecodeRLP decodes the Ethereum
func (b *Block) DecodeRLP(s *rlp.Stream) error {
var eb extblock
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions = eb.Header, eb.Uncles, eb.Txs
b.size.Store(common.StorageSize(rlp.ListSize(size)))
return nil
}
// EncodeRLP serializes b into the Ethereum RLP block format.
func (b *Block) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, extblock{
Header: b.header,
Txs: b.transactions,
Uncles: b.uncles,
})
}
// [deprecated by eth/63]
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
var sb storageblock
if err := s.Decode(&sb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD
return nil
}
// TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
if transaction.Hash() == hash {
return transaction
}
}
return nil
}
func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) }
func (b *Block) GasLimit() *big.Int { return new(big.Int).Set(b.header.GasLimit) }
func (b *Block) GasUsed() *big.Int { return new(big.Int).Set(b.header.GasUsed) }
func (b *Block) Difficulty() *big.Int { return new(big.Int).Set(b.header.Difficulty) }
func (b *Block) Time() *big.Int { return new(big.Int).Set(b.header.Time) }
func (b *Block) NumberU64() uint64 { return b.header.Number.Uint64() }
func (b *Block) MixDigest() common.Hash { return b.header.MixDigest }
func (b *Block) Nonce() uint64 { return binary.BigEndian.Uint64(b.header.Nonce[:]) }
func (b *Block) Bloom() Bloom { return b.header.Bloom }
func (b *Block) Coinbase() common.Address { return b.header.Coinbase }
func (b *Block) Root() common.Hash { return b.header.Root }
func (b *Block) ParentHash() common.Hash { return b.header.ParentHash }
func (b *Block) TxHash() common.Hash { return b.header.TxHash }
func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
func (b *Block) Header() *Header { return CopyHeader(b.header) }
// Body returns the non-header content of the block.
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} }
func (b *Block) HashNoNonce() common.Hash {
return b.header.HashNoNonce()
}
func (b *Block) Size() common.StorageSize {
if size := b.size.Load(); size != nil {
return size.(common.StorageSize)
}
c := writeCounter(0)
rlp.Encode(&c, b)
b.size.Store(common.StorageSize(c))
return common.StorageSize(c)
}
type writeCounter common.StorageSize
func (c *writeCounter) Write(b []byte) (int, error) {
*c += writeCounter(len(b))
return len(b), nil
}
func CalcUncleHash(uncles []*Header) common.Hash {
return rlpHash(uncles)
}
// WithMiningResult returns a new block with the data from b
// where nonce and mix digest are set to the provided values.
func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
cpy := *b.header
binary.BigEndian.PutUint64(cpy.Nonce[:], nonce)
cpy.MixDigest = mixDigest
return &Block{
header: &cpy,
transactions: b.transactions,
uncles: b.uncles,
}
}
// WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{
header: CopyHeader(b.header),
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
}
copy(block.transactions, transactions)
for i := range uncles {
block.uncles[i] = CopyHeader(uncles[i])
}
return block
}
// Hash returns the keccak256 hash of b's header.
// The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash {
if hash := b.hash.Load(); hash != nil {
return hash.(common.Hash)
}
v := rlpHash(b.header)
b.hash.Store(v)
return v
}
func (b *Block) String() string {
str := fmt.Sprintf(`Block(#%v): Size: %v {
MinerHash: %x
%v
Transactions:
%v
Uncles:
%v
}
`, b.Number(), b.Size(), b.header.HashNoNonce(), b.header, b.transactions, b.uncles)
return str
}
func (h *Header) String() string {
return fmt.Sprintf(`Header(%x):
[
ParentHash: %x
UncleHash: %x
Coinbase: %x
Root: %x
TxSha %x
ReceiptSha: %x
Bloom: %x
Difficulty: %v
Number: %v
GasLimit: %v
GasUsed: %v
Time: %v
Extra: %s
MixDigest: %x
Nonce: %x
]`, h.Hash(), h.ParentHash, h.UncleHash, h.Coinbase, h.Root, h.TxHash, h.ReceiptHash, h.Bloom, h.Difficulty, h.Number, h.GasLimit, h.GasUsed, h.Time, h.Extra, h.MixDigest, h.Nonce)
}
type Blocks []*Block
type BlockBy func(b1, b2 *Block) bool
func (self BlockBy) Sort(blocks Blocks) {
bs := blockSorter{
blocks: blocks,
by: self,
}
sort.Sort(bs)
}
type blockSorter struct {
blocks Blocks
by func(b1, b2 *Block) bool
}
func (self blockSorter) Len() int { return len(self.blocks) }
func (self blockSorter) Swap(i, j int) {
self.blocks[i], self.blocks[j] = self.blocks[j], self.blocks[i]
}
func (self blockSorter) Less(i, j int) bool { return self.by(self.blocks[i], self.blocks[j]) }
func Number(b1, b2 *Block) bool { return b1.header.Number.Cmp(b2.header.Number) < 0 }<|fim▁end|> | UncleHash *common.Hash `json:"sha3Uncles"` |
<|file_name|>pycbc_phenomC_tmplt.py<|end_file_name|><|fim▁begin|># Copyright (C) 2012 Prayush Kumar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with with program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import lal
import numpy
from numpy import sqrt, log, float128
from pycuda.elementwise import ElementwiseKernel
from pycbc.libutils import pkg_config_header_strings
from pycbc.types import FrequencySeries, zeros, Array, complex64
preamble = """
#include <lal/LALConstants.h>
"""
phenomC_text = """
/* ********* Main paper : Phys Rev D82, 064016 (2010) ********* */
const double f = (double) (i + kmin ) * delta_f;
const double fd = (double) m_sec * f;
const double v = (double) cbrt(piM*f);
const double v2 = v * v;
const double v3 = v * v * v;
const double v4 = v2 * v2;
const double v5 = v2 * v3;
const double v6 = v3 * v3;
const double v7 = v3 * v4;
const double w = (double) cbrt( m_sec * f );
const double w3 = (double) w * w * w;
/* ******************************************************* */
/* *********************** Phasing *********************** */
/* This is defined in Eq 5.1 - 5.9, 3.13 of the main paper */
/* ******************************************************* */
double phSPA = 1. + pfa2 * v2 + pfa3 * v3 + pfa4 * v4 +
(1. + log(v3)) * pfa5 * v5 + (pfa6 + pfa6log * log(v3))*v6 +
pfa7 * v7;
phSPA *= (pfaN / v5);
phSPA -= (LAL_PI/4.0);
double phPM = (a1/(w3 * w * w)) + (a2/w3) + (a3/w) + a4 + (a5 * w * w) +(a6 * w3);
phPM /= eta;
double phRD = b1 + b2*fd;
double wPlusf1 = 0.5*(1. + tanh( (4*(fd - Mf1)/d1) ));
double wMinusf1 = 0.5*(1. - tanh( (4*(fd - Mf1)/d1) ));
double wPlusf2 = 0.5*(1. + tanh( (4*(fd - Mf2)/d2) ));
double wMinusf2 = 0.5*(1. - tanh( (4*(fd - Mf2)/d2) ));
double phasing = (phSPA * ((double) wMinusf1)) + (phPM * ((double) wPlusf1 * wMinusf2)) +
(phRD * ((double) wPlusf2));
/* ******************************************************* */
/* ********************** Amplitude **************** */
/* *** This is defined in Eq 5.11 - 5.13, 3.10, 3.6 ****** */
/* ******************************************************* */
double xdot = 1. + xdota2 * v2 + xdota3 * v3 + xdota4 * v4 + xdota5 * v5 +
(xdota6 + xdota6log * log(v2)) * v6 + xdota7 * v7;
xdot *= (xdotaN * v5 * v5);
double omgdot = 0.0, ampfac = 0.0;
double ampSPA = 0.0, ampSPAre = 0.0, ampSPAim = 0.0;
/* If xdot becomes negative, take ampSPA = 0.0 */
/* This is valid because it becomes negative much after ISCO */
if( xdot > 0.0 )
{
omgdot = 1.5 * v * xdot;
ampfac = sqrt( LAL_PI / omgdot );
ampSPAre = ampfac * AN * v2 * (1. + A2 * v2 + A3 * v3 + A4 * v4 +
A5 * v5 + (A6 + A6log * log(v2)) * v6);
ampSPAim = ampfac * AN * v2 * (A5imag * v5 + A6imag * v6);
ampSPA = sqrt( ampSPAre * ampSPAre + ampSPAim * ampSPAim );
}
double ampPM = ampSPA + (g1 * pow(fd, 5./6.));
const double sig = Mfrd * del2 / Q;
double sig2 = sig * sig;
double L = sig2 / ((fd - Mfrd) * (fd - Mfrd) + sig2/4.);
double ampRD = del1 * L * pow( fd, -7./6.);
double wPlusf0 = 0.5*(1. + tanh( (4*(fd - Mf0)/d0) ));
double wMinusf0 = 0.5*(1. - tanh( (4*(fd - Mf0)/d0) ));
double amplitude = (ampPM * ((double) wMinusf0)) + (ampRD * ((double) wPlusf0));
amplitude /= distance;
/* ************** htilde **************** */
htilde[i]._M_re = amplitude * cos( phasing );
htilde[i]._M_im = -1.0 * amplitude * sin( phasing );
"""
phenomC_kernel = ElementwiseKernel("""pycuda::complex<double> *htilde, int kmin, double delta_f,
double eta, double Xi, double distance,
double m_sec, double piM, double Mfrd,
double pfaN, double pfa2, double pfa3, double pfa4,
double pfa5, double pfa6, double pfa6log, double pfa7,
double a1, double a2, double a3, double a4,
double a5, double a6, double b1, double b2,
double Mf1, double Mf2, double Mf0,
double d1, double d2, double d0,
double xdota2, double xdota3, double xdota4,
double xdota5, double xdota6, double xdota6log,
double xdota7, double xdotaN, double AN,
double A2, double A3, double A4, double A5,
double A5imag, double A6, double A6log, double A6imag,
double g1, double del1, double del2, double Q""",
phenomC_text, "phenomC_kernel",
preamble=preamble, options=pkg_config_header_strings(['lal']))
def FinalSpin( Xi, eta ):
"""Computes the spin of the final BH that gets formed after merger. This is done usingn Eq 5-6 of arXiv:0710.3345"""
s4 = -0.129
s5 = -0.384
t0 = -2.686
t2 = -3.454
t3 = 2.353
etaXi = eta * Xi
eta2 = eta*eta
finspin = (Xi + s4*Xi*etaXi + s5*etaXi*eta + t0*etaXi + 2.*(3.**0.5)*eta + t2*eta2 + t3*eta2*eta)
if finspin > 1.0:
raise ValueError("Value of final spin > 1.0. Aborting")
else:
return finspin
def fRD( a, M):
"""Calculate the ring-down frequency for the final Kerr BH. Using Eq. 5.5 of Main paper"""
f = (lal.C_SI**3.0 / (2.0*lal.PI*lal.G_SI*M*lal.MSUN_SI)) * (1.5251 - 1.1568*(1.0-a)**0.1292)
return f
def Qa( a ):
"""Calculate the quality factor of ring-down, using Eq 5.6 of Main paper"""
return (0.7 + 1.4187*(1.0-a)**-0.4990)
#Functions to calculate the Tanh window, defined in Eq 5.8 of the main paper
def imrphenomc_tmplt(**kwds):
""" Return an IMRPhenomC waveform using CUDA to generate the phase and amplitude
Main Paper: arXiv:1005.3306
"""
# Pull out the input arguments
f_min = float128(kwds['f_lower'])
f_max = float128(kwds['f_final'])
delta_f = float128(kwds['delta_f'])
distance = float128(kwds['distance'])
mass1 = float128(kwds['mass1'])
mass2 = float128(kwds['mass2'])
spin1z = float128(kwds['spin1z'])
spin2z = float128(kwds['spin2z'])
if 'out' in kwds:
out = kwds['out']
else:
out = None
# Calculate binary parameters
M = mass1 + mass2
eta = mass1 * mass2 / (M * M)
Xi = (mass1 * spin1z / M) + (mass2 * spin2z / M)
Xisum = 2.*Xi
Xiprod = Xi*Xi
Xi2 = Xi*Xi
m_sec = M * lal.MTSUN_SI;
piM = lal.PI * m_sec;
## The units of distance given as input is taken to pe Mpc. Converting to SI
distance *= (1.0e6 * lal.PC_SI / (2. * sqrt(5. / (64.*lal.PI)) * M * lal.MRSUN_SI * M * lal.MTSUN_SI))
# Check if the value of f_max is correctly given, else replace with the fCut
# used in the PhenomB code in lalsimulation. The various coefficients come
# from Eq.(4.18) of http://arxiv.org/pdf/0710.2335 and
# Table I of http://arxiv.org/pdf/0712.0343
if not f_max:
f_max = (1.7086 * eta * eta - 0.26592 * eta + 0.28236) / piM
# Transform the eta, chi to Lambda parameters, using Eq 5.14, Table II of Main
# paper.
z101 = -2.417e-03
z102 = -1.093e-03
z111 = -1.917e-02
z110 = 7.267e-02
z120 = -2.504e-01
z201 = 5.962e-01
z202 = -5.600e-02
z211 = 1.520e-01
z210 = -2.970e+00
z220 = 1.312e+01
z301 = -3.283e+01
z302 = 8.859e+00
z311 = 2.931e+01
z310 = 7.954e+01
z320 = -4.349e+02
z401 = 1.619e+02
z402 = -4.702e+01
z411 = -1.751e+02
z410 = -3.225e+02
z420 = 1.587e+03
z501 = -6.320e+02
z502 = 2.463e+02
z511 = 1.048e+03
z510 = 3.355e+02
z520 = -5.115e+03
z601 = -4.809e+01
z602 = -3.643e+02
z611 = -5.215e+02
z610 = 1.870e+03
z620 = 7.354e+02
z701 = 4.149e+00
z702 = -4.070e+00
z711 = -8.752e+01
z710 = -4.897e+01
z720 = 6.665e+02
z801 = -5.472e-02
z802 = 2.094e-02
z811 = 3.554e-01
z810 = 1.151e-01
z820 = 9.640e-01
z901 = -1.235e+00
z902 = 3.423e-01
z911 = 6.062e+00
z910 = 5.949e+00
z920 = -1.069e+01
eta2 = eta*eta
Xi2 = Xiprod
# Calculate alphas, gamma, deltas from Table II and Eq 5.14 of Main paper
a1 = z101 * Xi + z102 * Xi2 + z111 * eta * Xi + z110 * eta + z120 * eta2
a2 = z201 * Xi + z202 * Xi2 + z211 * eta * Xi + z210 * eta + z220 * eta2
a3 = z301 * Xi + z302 * Xi2 + z311 * eta * Xi + z310 * eta + z320 * eta2
a4 = z401 * Xi + z402 * Xi2 + z411 * eta * Xi + z410 * eta + z420 * eta2
a5 = z501 * Xi + z502 * Xi2 + z511 * eta * Xi + z510 * eta + z520 * eta2
a6 = z601 * Xi + z602 * Xi2 + z611 * eta * Xi + z610 * eta + z620 * eta2
g1 = z701 * Xi + z702 * Xi2 + z711 * eta * Xi + z710 * eta + z720 * eta2
del1 = z801 * Xi + z802 * Xi2 + z811 * eta * Xi + z810 * eta + z820 * eta2
del2 = z901 * Xi + z902 * Xi2 + z911 * eta * Xi + z910 * eta + z920 * eta2
# Get the spin of the final BH
afin = FinalSpin( Xi, eta )
Q = Qa( abs(afin) )
# Get the fRD
frd = fRD( abs(afin), M)
Mfrd = frd * m_sec
# Define the frequencies where SPA->PM->RD
f1 = 0.1 * frd
Mf1 = m_sec * f1
f2 = frd
Mf2 = m_sec * f2
d1 = 0.005
d2 = 0.005
f0 = 0.98 * frd
Mf0 = m_sec * f0
d0 = 0.015
# Now use this frequency for calculation of betas
# calculate beta1 and beta2, that appear in Eq 5.7 in the main paper.
b2 = ((-5./3.)* a1 * pow(Mfrd,(-8./3.)) - a2/(Mfrd*Mfrd) - \
(a3/3.)*pow(Mfrd,(-4./3.)) + (2./3.)* a5 * pow(Mfrd,(-1./3.)) + a6)/eta
psiPMrd = (a1 * pow(Mfrd,(-5./3.)) + a2/Mfrd + a3 * pow(Mfrd,(-1./3.)) + \
a4 + a5 * pow(Mfrd,(2./3.)) + a6 * Mfrd)/eta
b1 = psiPMrd - (b2 * Mfrd)
### Calculate the PN coefficients, Eq A3 - A5 of main paper ###
pfaN = 3.0/(128.0 * eta)
pfa2 = (3715./756.) + (55.*eta/9.0)<|fim▁hole|> 3085.*eta2/72.
pfa5 = lal.PI*(386.45/7.56 - 65.*eta/9.) - \
Xi*(735.505/2.268 + 130.*eta/9.) + Xisum*(1285.0*eta/8.1 + 170.*eta2/9.) - \
10.*Xi2*Xi/3. + 10.*eta*Xi*Xiprod
pfa6 = 11583.231236531/4.694215680 - 640.0*lal.PI*lal.PI/3. - \
6848.0*lal.GAMMA/21. - 684.8*log(64.)/6.3 + \
eta*(2255.*lal.PI*lal.PI/12. - 15737.765635/3.048192) + \
76.055*eta2/1.728 - (127.825*eta2*eta/1.296) + \
2920.*lal.PI*Xi/3. - (175. - 1490.*eta)*Xi2/3. - \
(1120.*lal.PI/3. - 1085.*Xi/3.)*eta*Xisum + \
(269.45*eta/3.36 - 2365.*eta2/6.)*Xiprod
pfa6log = -6848./63.
pfa7 = lal.PI*(770.96675/2.54016 + 378.515*eta/1.512 - 740.45*eta2/7.56) - \
Xi*(20373.952415/3.048192 + 1509.35*eta/2.24 - 5786.95*eta2/4.32) + \
Xisum*(4862.041225*eta/1.524096 + 1189.775*eta2/1.008 - 717.05*eta2*eta/2.16 - 830.*eta*Xi2/3. + 35.*eta2*Xiprod/3.) - \
560.*lal.PI*Xi2 + 20.*lal.PI*eta*Xiprod + \
Xi2*Xi*(945.55/1.68 - 85.*eta) + Xi*Xiprod*(396.65*eta/1.68 + 255.*eta2)
xdotaN = 64.*eta/5.
xdota2 = -7.43/3.36 - 11.*eta/4.
xdota3 = 4.*lal.PI - 11.3*Xi/1.2 + 19.*eta*Xisum/6.
xdota4 = 3.4103/1.8144 + 5*Xi2 + eta*(13.661/2.016 - Xiprod/8.) + 5.9*eta2/1.8
xdota5 = -lal.PI*(41.59/6.72 + 189.*eta/8.) - Xi*(31.571/1.008 - 116.5*eta/2.4) + \
Xisum*(21.863*eta/1.008 - 79.*eta2/6.) - 3*Xi*Xi2/4. + \
9.*eta*Xi*Xiprod/4.
xdota6 = 164.47322263/1.39708800 - 17.12*lal.GAMMA/1.05 + \
16.*lal.PI*lal.PI/3 - 8.56*log(16.)/1.05 + \
eta*(45.1*lal.PI*lal.PI/4.8 - 561.98689/2.17728) + \
5.41*eta2/8.96 - 5.605*eta*eta2/2.592 - 80.*lal.PI*Xi/3. + \
eta*Xisum*(20.*lal.PI/3. - 113.5*Xi/3.6) + \
Xi2*(64.153/1.008 - 45.7*eta/3.6) - \
Xiprod*(7.87*eta/1.44 - 30.37*eta2/1.44)
xdota6log = -856./105.
xdota7 = -lal.PI*(4.415/4.032 - 358.675*eta/6.048 - 91.495*eta2/1.512) - \
Xi*(252.9407/2.7216 - 845.827*eta/6.048 + 415.51*eta2/8.64) + \
Xisum*(158.0239*eta/5.4432 - 451.597*eta2/6.048 + 20.45*eta2*eta/4.32 + 107.*eta*Xi2/6. - 5.*eta2*Xiprod/24.) + \
12.*lal.PI*Xi2 - Xi2*Xi*(150.5/2.4 + eta/8.) + \
Xi*Xiprod*(10.1*eta/2.4 + 3.*eta2/8.)
AN = 8.*eta*sqrt(lal.PI/5.)
A2 = (-107. + 55.*eta)/42.
A3 = 2.*lal.PI - 4.*Xi/3. + 2.*eta*Xisum/3.
A4 = -2.173/1.512 - eta*(10.69/2.16 - 2.*Xiprod) + 2.047*eta2/1.512
A5 = -10.7*lal.PI/2.1 + eta*(3.4*lal.PI/2.1)
A5imag = -24.*eta
A6 = 270.27409/6.46800 - 8.56*lal.GAMMA/1.05 + \
2.*lal.PI*lal.PI/3. + \
eta*(4.1*lal.PI*lal.PI/9.6 - 27.8185/3.3264) - \
20.261*eta2/2.772 + 11.4635*eta*eta2/9.9792 - \
4.28*log(16.)/1.05
A6log = -428./105.
A6imag = 4.28*lal.PI/1.05
### Define other parameters needed by waveform generation ###
kmin = int(f_min / delta_f)
kmax = int(f_max / delta_f)
n = kmax + 1;
if not out:
htilde = FrequencySeries(zeros(n,dtype=numpy.complex128), delta_f=delta_f, copy=False)
else:
if type(out) is not Array:
raise TypeError("Output must be an instance of Array")
if len(out) < kmax:
raise TypeError("Output array is too small")
if out.dtype != complex64:
raise TypeError("Output array is the wrong dtype")
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
phenomC_kernel(htilde.data[kmin:kmax], kmin, delta_f, eta, Xi, distance,
m_sec, piM, Mfrd,
pfaN, pfa2, pfa3, pfa4, pfa5, pfa6, pfa6log, pfa7,
a1, a2, a3, a4, a5, a6, b1, b2,
Mf1, Mf2, Mf0, d1, d2, d0,
xdota2, xdota3, xdota4, xdota5, xdota6, xdota6log,
xdota7, xdotaN, AN, A2, A3, A4, A5,
A5imag, A6, A6log, A6imag,
g1, del1, del2, Q )
hp = htilde
hc = htilde * 1j
return hp, hc<|fim▁end|> | pfa3 = -16.0*lal.PI + (113./3.)*Xi - 38.*eta*Xisum/3.
pfa4 = (152.93365/5.08032) - 50.*Xi2 + eta*(271.45/5.04 + 1.25*Xiprod) + \ |
<|file_name|>query_issuer.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright 2014 Philipp Winter <[email protected]>
#
# This file is part of atlas tools.
#
# atlas tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with atlas tools. If not, see <http://www.gnu.org/licenses/>.
import urllib2
import logger
log = logger.get_logger()
def send_query(json_blurb, api_key):
"""
Send HTTP POST request containing the JSON-formatted Atlas query.
If successful, Atlas' API should return the JSON-formatted measurement ID.
"""
url = "https://atlas.ripe.net/api/v1/measurement/?key=" + api_key
log.debug("Sending %d bytes of JSON blurb to %s." % (len(json_blurb), url))
request = urllib2.Request(url, json_blurb)
request.add_header("Content-Type", "application/json")
request.add_header("Accept", "application/json")
try:
response = urllib2.urlopen(request)
except urllib2.URLError as err:
log.error("urllib2.urlopen failed: %s" % err)
return None
result = response.read()
log.debug("Received: %s" % result)
return result<|fim▁end|> | #
# atlas tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Support for EcoNet products."""
from datetime import timedelta
import logging
from aiohttp.client_exceptions import ClientError
from pyeconet import EcoNetApiInterface
from pyeconet.equipment import EquipmentType
from pyeconet.errors import (
GenericHTTPError,
InvalidCredentialsError,
InvalidResponseFormat,
PyeconetError,<|fim▁hole|>from homeassistant.const import CONF_EMAIL, CONF_PASSWORD, TEMP_FAHRENHEIT, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .const import API_CLIENT, DOMAIN, EQUIPMENT
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
Platform.CLIMATE,
Platform.BINARY_SENSOR,
Platform.SENSOR,
Platform.WATER_HEATER,
]
PUSH_UPDATE = "econet.push_update"
INTERVAL = timedelta(minutes=60)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the EcoNet component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][API_CLIENT] = {}
hass.data[DOMAIN][EQUIPMENT] = {}
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up EcoNet as config entry."""
email = config_entry.data[CONF_EMAIL]
password = config_entry.data[CONF_PASSWORD]
try:
api = await EcoNetApiInterface.login(email, password=password)
except InvalidCredentialsError:
_LOGGER.error("Invalid credentials provided")
return False
except PyeconetError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
try:
equipment = await api.get_equipment_by_type(
[EquipmentType.WATER_HEATER, EquipmentType.THERMOSTAT]
)
except (ClientError, GenericHTTPError, InvalidResponseFormat) as err:
raise ConfigEntryNotReady from err
hass.data[DOMAIN][API_CLIENT][config_entry.entry_id] = api
hass.data[DOMAIN][EQUIPMENT][config_entry.entry_id] = equipment
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
api.subscribe()
def update_published():
"""Handle a push update."""
dispatcher_send(hass, PUSH_UPDATE)
for _eqip in equipment[EquipmentType.WATER_HEATER]:
_eqip.set_update_callback(update_published)
for _eqip in equipment[EquipmentType.THERMOSTAT]:
_eqip.set_update_callback(update_published)
async def resubscribe(now):
"""Resubscribe to the MQTT updates."""
await hass.async_add_executor_job(api.unsubscribe)
api.subscribe()
async def fetch_update(now):
"""Fetch the latest changes from the API."""
await api.refresh_equipment()
config_entry.async_on_unload(async_track_time_interval(hass, resubscribe, INTERVAL))
config_entry.async_on_unload(
async_track_time_interval(hass, fetch_update, INTERVAL + timedelta(minutes=1))
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a EcoNet config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][API_CLIENT].pop(entry.entry_id)
hass.data[DOMAIN][EQUIPMENT].pop(entry.entry_id)
return unload_ok
class EcoNetEntity(Entity):
"""Define a base EcoNet entity."""
def __init__(self, econet):
"""Initialize."""
self._econet = econet
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
PUSH_UPDATE, self.on_update_received
)
)
@callback
def on_update_received(self):
"""Update was pushed from the ecoent API."""
self.async_write_ha_state()
@property
def available(self):
"""Return if the the device is online or not."""
return self._econet.connected
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
return DeviceInfo(
identifiers={(DOMAIN, self._econet.device_id)},
manufacturer="Rheem",
name=self._econet.device_name,
)
@property
def name(self):
"""Return the name of the entity."""
return self._econet.device_name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._econet.device_id}_{self._econet.device_name}"
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False<|fim▁end|> | )
from homeassistant.config_entries import ConfigEntry |
<|file_name|>tst_updateshaderdatatransformjob.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2016 Paul Lemire <[email protected]>
** Contact: https://www.qt.io/licensing/
**
** This file is part of the Qt3D module of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:GPL-EXCEPT$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3 as published by the Free Software
** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include <QtTest/QTest>
#include <Qt3DRender/private/updateshaderdatatransformjob_p.h>
#include <Qt3DRender/private/updateworldtransformjob_p.h>
#include <Qt3DRender/private/nodemanagers_p.h>
#include <Qt3DRender/private/managers_p.h>
#include <Qt3DRender/qrenderaspect.h>
#include <Qt3DCore/qentity.h>
#include <Qt3DRender/qshaderdata.h>
#include <Qt3DRender/qcamera.h>
#include <Qt3DRender/private/shaderdata_p.h>
#include <Qt3DRender/private/qrenderaspect_p.h>
#include <Qt3DCore/private/qnodecreatedchangegenerator_p.h>
#include "qmlscenereader.h"
QT_BEGIN_NAMESPACE
namespace Qt3DRender {
class TestAspect : public Qt3DRender::QRenderAspect
{
public:
TestAspect(Qt3DCore::QNode *root)
: Qt3DRender::QRenderAspect(Qt3DRender::QRenderAspect::Synchronous)
, m_sceneRoot(nullptr)
{
Qt3DRender::QRenderAspect::onRegistered();
const Qt3DCore::QNodeCreatedChangeGenerator generator(root);
const QVector<Qt3DCore::QNodeCreatedChangeBasePtr> creationChanges = generator.creationChanges();
d_func()->setRootAndCreateNodes(qobject_cast<Qt3DCore::QEntity *>(root), creationChanges);
Qt3DRender::Render::Entity *rootEntity = nodeManagers()->lookupResource<Qt3DRender::Render::Entity, Render::EntityManager>(rootEntityId());
Q_ASSERT(rootEntity);
m_sceneRoot = rootEntity;
}
~TestAspect()
{
QRenderAspect::onUnregistered();
}
void onRegistered() { Qt3DRender::QRenderAspect::onRegistered(); }
void onUnregistered() { Qt3DRender::QRenderAspect::onUnregistered(); }
Qt3DRender::Render::NodeManagers *nodeManagers() const { return d_func()->m_renderer->nodeManagers(); }
Qt3DRender::Render::FrameGraphNode *frameGraphRoot() const { return d_func()->m_renderer->frameGraphRoot(); }
Qt3DRender::Render::RenderSettings *renderSettings() const { return d_func()->m_renderer->settings(); }
Qt3DRender::Render::Entity *sceneRoot() const { return m_sceneRoot; }
private:
Qt3DRender::Render::Entity *m_sceneRoot;
};
} // Qt3DRender
QT_END_NAMESPACE
namespace {
void runRequiredJobs(Qt3DRender::TestAspect *test)
{
Qt3DRender::Render::UpdateWorldTransformJob updateWorldTransform;
updateWorldTransform.setRoot(test->sceneRoot());
updateWorldTransform.run();
}
struct NodeCollection
{
explicit NodeCollection(Qt3DRender::TestAspect *aspect, QObject *frontendRoot)
: shaderData(frontendRoot->findChildren<Qt3DRender::QShaderData *>())
{
// THEN
QCOMPARE(aspect->nodeManagers()->shaderDataManager()->activeHandles().size(), shaderData.size());
for (const Qt3DRender::QShaderData *s : qAsConst(shaderData)) {
Qt3DRender::Render::ShaderData *backend = aspect->nodeManagers()->shaderDataManager()->lookupResource(s->id());
QVERIFY(backend != nullptr);
backendShaderData.push_back(backend);
}
}
QList<Qt3DRender::QShaderData *> shaderData;
QVector<Qt3DRender::Render::ShaderData *> backendShaderData;
};
} // anonymous
class tst_UpdateShaderDataTransformJob : public QObject
{
Q_OBJECT
private Q_SLOTS:
void checkInitialState()
{
// GIVEN
Qt3DRender::Render::UpdateShaderDataTransformJob backendUpdateShaderDataTransformJob;
// THEN
QVERIFY(backendUpdateShaderDataTransformJob.managers() == nullptr);
}
void checkInitializeState()
{
// GIVEN
Qt3DRender::Render::UpdateShaderDataTransformJob backendUpdateShaderDataTransformJob;
Qt3DRender::Render::NodeManagers managers;
// WHEN
backendUpdateShaderDataTransformJob.setManagers(&managers);
// THEN
QVERIFY(backendUpdateShaderDataTransformJob.managers() == &managers);
}
void checkRunModelToEye()
{
// GIVEN
QmlSceneReader sceneReader(QUrl("qrc:/test_scene_model_to_eye.qml"));
QScopedPointer<Qt3DCore::QNode> root(qobject_cast<Qt3DCore::QNode *>(sceneReader.root()));
QVERIFY(root);
QScopedPointer<Qt3DRender::TestAspect> test(new Qt3DRender::TestAspect(root.data()));
// Properly compute the world transforms
runRequiredJobs(test.data());
// WHEN
Qt3DRender::QCamera *camera = root->findChild<Qt3DRender::QCamera *>();
const NodeCollection collection(test.data(), root.data());
// THEN
QCOMPARE(collection.shaderData.size(), 1);
QVERIFY(camera != nullptr);
// WHEN
Qt3DRender::Render::ShaderData *backendShaderData = collection.backendShaderData.first();
// THEN
QCOMPARE(backendShaderData->properties().size(), 3);
QVERIFY(backendShaderData->properties().contains(QLatin1String("eyePosition")));
QVERIFY(backendShaderData->properties().contains(QLatin1String("eyePositionTransformed")));
QCOMPARE(backendShaderData->properties()[QLatin1String("eyePosition")].value<QVector3D>(), QVector3D(1.0f, 1.0f, 1.0f));
QCOMPARE(backendShaderData->properties()[QLatin1String("eyePositionTransformed")].toInt(), int(Qt3DRender::Render::ShaderData::ModelToEye));
// WHEN
Qt3DRender::Render::UpdateShaderDataTransformJob backendUpdateShaderDataTransformJob;
backendUpdateShaderDataTransformJob.setManagers(test->nodeManagers());
backendUpdateShaderDataTransformJob.run();
// THEN
// See scene file to find translation
QCOMPARE(backendShaderData->getTransformedProperty(QLatin1String("eyePosition"), camera->viewMatrix()).value<QVector3D>(), camera->viewMatrix() * (QVector3D(1.0f, 1.0f, 1.0f) + QVector3D(0.0f, 5.0f, 0.0f)));
}
void checkRunModelToWorld()
{
// GIVEN
QmlSceneReader sceneReader(QUrl("qrc:/test_scene_model_to_world.qml"));
QScopedPointer<Qt3DCore::QNode> root(qobject_cast<Qt3DCore::QNode *>(sceneReader.root()));<|fim▁hole|> // Properly compute the world transforms
runRequiredJobs(test.data());
// WHEN
Qt3DRender::QCamera *camera = root->findChild<Qt3DRender::QCamera *>();
const NodeCollection collection(test.data(), root.data());
// THEN
QCOMPARE(collection.shaderData.size(), 1);
QVERIFY(camera != nullptr);
// WHEN
Qt3DRender::Render::ShaderData *backendShaderData = collection.backendShaderData.first();
// THEN
QCOMPARE(backendShaderData->properties().size(), 3);
QVERIFY(backendShaderData->properties().contains(QLatin1String("position")));
QVERIFY(backendShaderData->properties().contains(QLatin1String("positionTransformed")));
QCOMPARE(backendShaderData->properties()[QLatin1String("position")].value<QVector3D>(), QVector3D(1.0f, 1.0f, 1.0f));
QCOMPARE(backendShaderData->properties()[QLatin1String("positionTransformed")].toInt(), int(Qt3DRender::Render::ShaderData::ModelToWorld));
// WHEN
Qt3DRender::Render::UpdateShaderDataTransformJob backendUpdateShaderDataTransformJob;
backendUpdateShaderDataTransformJob.setManagers(test->nodeManagers());
backendUpdateShaderDataTransformJob.run();
// THEN
// See scene file to find translation
QCOMPARE(backendShaderData->getTransformedProperty(QLatin1String("position"), camera->viewMatrix()).value<QVector3D>(), QVector3D(1.0f, 1.0f, 1.0f) + QVector3D(5.0f, 5.0f, 5.0f));
}
};
QTEST_MAIN(tst_UpdateShaderDataTransformJob)
#include "tst_updateshaderdatatransformjob.moc"<|fim▁end|> | QVERIFY(root);
QScopedPointer<Qt3DRender::TestAspect> test(new Qt3DRender::TestAspect(root.data()));
|
<|file_name|>aws_logs.py<|end_file_name|><|fim▁begin|># Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing classes related to AWS CloudWatch Logs."""
import json
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
class LogGroup(resource.BaseResource):
"""Class representing a CloudWatch log group."""
def __init__(self, region, name, retention_in_days=7):
super(LogGroup, self).__init__()
self.region = region
self.name = name
self.retention_in_days = retention_in_days
def _Create(self):
"""Create the log group."""<|fim▁hole|> '--region', self.region,
'logs', 'create-log-group',
'--log-group-name', self.name
]
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Delete the log group."""
delete_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'delete-log-group',
'--log-group-name', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def Exists(self):
"""Returns True if the log group exists."""
describe_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'describe-log-groups',
'--log-group-name-prefix', self.name,
'--no-paginate'
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
log_groups = json.loads(stdout)['logGroups']
group = next((group for group in log_groups
if group['logGroupName'] == self.name), None)
return bool(group)
def _PostCreate(self):
"""Set the retention policy."""
put_cmd = util.AWS_PREFIX + [
'--region', self.region,
'logs', 'put-retention-policy',
'--log-group-name', self.name,
'--retention-in-days', str(self.retention_in_days)
]
vm_util.IssueCommand(put_cmd)
def GetLogs(region, stream_name, group_name, token=None):
"""Fetches the JSON formatted log stream starting at the token."""
get_cmd = util.AWS_PREFIX + [
'--region', region,
'logs', 'get-log-events',
'--start-from-head',
'--log-group-name', group_name,
'--log-stream-name', stream_name,
]
if token:
get_cmd.extend(['--next-token', token])
stdout, _, _ = vm_util.IssueCommand(get_cmd)
return json.loads(stdout)
def GetLogStreamAsString(region, stream_name, log_group):
"""Returns the messages of the log stream as a string."""
log_lines = []
token = None
events = []
while token is None or events:
response = GetLogs(region, stream_name, log_group, token)
events = response['events']
token = response['nextForwardToken']
for event in events:
log_lines.append(event['message'])
return '\n'.join(log_lines)<|fim▁end|> | create_cmd = util.AWS_PREFIX + [ |
<|file_name|>histogram_builder.py<|end_file_name|><|fim▁begin|>"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from ...models import Range1d
from ...properties import Bool, Int
from .._builder import create_and_build
from .bar_builder import BarBuilder
from ..glyphs import HistogramGlyph
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(data, values=None, label=None, color=None, agg="count",
bins=None, yscale="linear", xgrid=False, ygrid=True,
continuous_range=None, **kw):
if continuous_range and not isinstance(continuous_range, Range1d):
raise ValueError(
"continuous_range must be an instance of bokeh.models.ranges.Range1d"
)
# The continuous_range is the y_range (until we implement HBar charts)
y_range = continuous_range
kw['label'] = label
kw['values'] = values
kw['color'] = color
kw['agg'] = agg
kw['yscale'] = yscale
kw['xgrid'] = xgrid
kw['ygrid'] = ygrid
kw['y_range'] = y_range
kw['bins'] = bins
return create_and_build(HistogramBuilder, data, **kw)
class HistogramBuilder(BarBuilder):<|fim▁hole|>
"""
bins = Int(default=None, help="""
Number of bins to use for the histogram. (default: None
(use Freedman-Diaconis rule)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
glyph = HistogramGlyph
def _setup(self):
super(HistogramBuilder, self)._setup()
if self.attributes['color'].columns is not None:
self.fill_alpha = 0.6
def get_extra_args(self):
return dict(bin_count=self.bins)
def _set_ranges(self):
"""Push the Bar data into the ColumnDataSource and calculate
the proper ranges.
"""
x_max = max([comp_glyph.x_max for comp_glyph in self.comp_glyphs])
x_min = min([comp_glyph.x_min for comp_glyph in self.comp_glyphs])
y_max = max([comp_glyph.y_max for comp_glyph in self.comp_glyphs])
y_min = min([comp_glyph.y_min for comp_glyph in self.comp_glyphs])
x_buffer = ((x_max + x_min)/2.0)*0.1
self.x_range = Range1d(start=x_min - x_buffer, end=x_max + x_buffer)
self.y_range = Range1d(start=y_min, end=y_max * 1.1)<|fim▁end|> | """Generates one to many histograms with unique attributes.
The HistogramBuilder is responsible for producing a chart
containing one to many histograms from table-like inputs. |
<|file_name|>pltag.py<|end_file_name|><|fim▁begin|>from openflow.optin_manager.sfa.rspecs.elements.element import Element
class PLTag(Element):
fields = [
'tagname',<|fim▁hole|><|fim▁end|> | 'value',
] |
<|file_name|>test_helper_misc.py<|end_file_name|><|fim▁begin|># coding: utf-8
import os.path
from datetime import datetime
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
import pytest
from django_jinja.backend import Jinja2
from jinja2 import Markup
from mock import patch
from pyquery import PyQuery as pq
from bedrock.base.templatetags.helpers import static
from bedrock.mozorg.templatetags import misc
from bedrock.mozorg.tests import TestCase
from lib.l10n_utils.fluent import fluent_l10n
TEST_FILES_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_files')
TEST_L10N_MEDIA_PATH = os.path.join(TEST_FILES_ROOT, 'media', '%s', 'l10n')
TEST_DONATE_LINK = ('https://donate.mozilla.org/{locale}/'
'?presets={presets}&amount={default}'
'&utm_source=mozilla.org&utm_medium=referral&utm_content={source}'
'¤cy={currency}')
TEST_DONATE_PARAMS = {
'en-US': {
'currency': 'usd',
'presets': '100,50,25,15',
'default': '50'
},
'es-MX': {
'currency': 'eur',
'presets': '100,50,25,15',
'default': '15'
},
}
TEST_FIREFOX_TWITTER_ACCOUNTS = {
'en-US': 'https://twitter.com/firefox',
'es-ES': 'https://twitter.com/firefox_es',
'pt-BR': 'https://twitter.com/firefoxbrasil',
}
TEST_FXA_ENDPOINT = 'https://accounts.firefox.com/'
TEST_FXA_MOZILLAONLINE_ENDPOINT = 'https://accounts.firefox.com.cn/'
jinja_env = Jinja2.get_default()
# Where should this function go?
def render(s, context=None):
t = jinja_env.from_string(s)
return t.render(context or {})
def test_convert_to_high_res():
assert misc.convert_to_high_res('/media/img/the.dude.png') == '/media/img/the.dude-high-res.png'
assert misc.convert_to_high_res('/media/thats-a-bummer-man.jpg') == '/media/thats-a-bummer-man-high-res.jpg'
@patch('bedrock.mozorg.templatetags.misc._l10n_media_exists')
@patch('django.conf.settings.LANGUAGE_CODE', 'en-US')
class TestImgL10n(TestCase):
rf = RequestFactory()
def _render(self, locale, url):
req = self.rf.get('/')
req.locale = locale
return render("{{{{ l10n_img('{0}') }}}}".format(url),
{'request': req})
def test_works_for_default_lang(self, media_exists_mock):
"""Should output correct path for default lang always."""
media_exists_mock.return_value = True
assert self._render('en-US', 'dino/head.png') == static('img/l10n/en-US/dino/head.png')
assert self._render('en-US', 'img/dino/head.png') == static('img/l10n/en-US/dino/head.png')
assert self._render('en-US', 'dino/does-not-exist.png') == static('img/l10n/en-US/dino/does-not-exist.png')
def test_works_for_other_lang(self, media_exists_mock):
"""Should use the request lang if file exists."""
media_exists_mock.return_value = True
assert self._render('de', 'dino/head.png') == static('img/l10n/de/dino/head.png')
assert self._render('de', 'img/dino/head.png') == static('img/l10n/de/dino/head.png')
def test_defaults_when_lang_file_missing(self, media_exists_mock):
"""Should use default lang when file doesn't exist for lang."""
media_exists_mock.return_value = False
assert self._render('is', 'dino/head.png') == static('img/l10n/en-US/dino/head.png')
def test_latam_spanishes_fallback_to_european_spanish(self, media_exists_mock):
"""Should use es-ES image when file doesn't exist for lang."""
media_exists_mock.side_effect = [False, True]
assert self._render('es-AR', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
media_exists_mock.reset_mock()
media_exists_mock.side_effect = [False, True]
assert self._render('es-CL', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
media_exists_mock.reset_mock()
media_exists_mock.side_effect = [False, True]
assert self._render('es-MX', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
media_exists_mock.reset_mock()
media_exists_mock.side_effect = [False, True]
assert self._render('es', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
def test_file_not_checked_for_default_lang(self, media_exists_mock):
"""
Should not check filesystem for default lang, but should for others.
"""
assert self._render('en-US', 'dino/does-not-exist.png') == static('img/l10n/en-US/dino/does-not-exist.png')
assert not media_exists_mock.called
self._render('is', 'dino/does-not-exist.png')
media_exists_mock.assert_called_once_with('img', 'is', 'dino/does-not-exist.png')
@override_settings(DEBUG=False)
@patch('bedrock.mozorg.templatetags.misc._l10n_media_exists')
class TestL10nCSS(TestCase):
rf = RequestFactory()
static_url_dev = '/static/'
cdn_url = '//mozorg.cdn.mozilla.net'
static_url_prod = cdn_url + static_url_dev
markup = ('<link rel="stylesheet" media="screen,projection,tv" href='
'"%scss/l10n/%s/intl.css">')
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
return render('{{ l10n_css() }}', {'request': req})
@override_settings(DEV=True)
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_dev)
def test_dev_when_css_file_exists(self, media_exists_mock):
"""Should output a path to the CSS file if exists."""
media_exists_mock.return_value = True
assert self._render('de') == self.markup % (self.static_url_dev, 'de')
assert self._render('es-ES') == self.markup % (self.static_url_dev, 'es-ES')
@override_settings(DEV=True)
def test_dev_when_css_file_missing(self, media_exists_mock):
"""Should output nothing if the CSS file is missing."""
media_exists_mock.return_value = False
assert self._render('en-US') == ''
assert self._render('fr') == ''
@override_settings(DEV=False)
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_prod)
def test_prod_when_css_file_exists(self, media_exists_mock):
"""Should output a path to the CSS file if exists."""
media_exists_mock.return_value = True
assert self._render('de') == self.markup % (self.static_url_prod, 'de')
assert self._render('es-ES') == self.markup % (self.static_url_prod, 'es-ES')
@override_settings(DEV=False)
def test_prod_when_css_file_missing(self, media_exists_mock):
"""Should output nothing if the CSS file is missing."""
media_exists_mock.return_value = False
assert self._render('en-US') == ''
assert self._render('fr') == ''
class TestVideoTag(TestCase):
rf = RequestFactory()
# Video stubs
moz_video = 'http://videos.mozilla.org/serv/flux/example.%s'
nomoz_video = 'http://example.org/example.%s'
def get_l10n(self, locale):
return fluent_l10n([locale, 'en'], settings.FLUENT_DEFAULT_FILES)
def _render(self, template):
req = self.rf.get('/')
req.locale = 'en-US'
return render(template, {'request': req, 'fluent_l10n': self.get_l10n(req.locale)})
def test_empty(self):
# No video, no output.
assert render('{{ video() }}') == ''
def test_video(self):
# A few common variations
videos = [self.nomoz_video % ext for ext in ('ogv', 'mp4', 'webm')]
doc = pq(self._render("{{ video%s }}" % str(tuple(videos))))
# Tags generated?
assert doc('video').length == 1
assert doc('video source').length == 3
# Extensions in the right order?
extensions = [os.path.splitext(el.attrib['src'])[1] for el in doc('video source')]
assert extensions == ['.webm', '.ogv', '.mp4']
def test_prefix(self):
# Prefix should be applied to all videos.
doc = pq(self._render(
"{{ video('meh.mp4', 'meh.ogv', prefix='http://example.com/blah/') }}")
)
assert [el.attrib['src'] for el in doc('video source')] == [
'http://example.com/blah/meh.ogv',
'http://example.com/blah/meh.mp4',
]
def test_fileformats(self):
# URLs ending in strange extensions are ignored.
videos = [self.nomoz_video % ext for ext in
('ogv', 'exe', 'webm', 'txt')]
videos.append('http://example.net/noextension')
doc = pq(self._render("{{ video%s }}" % (str(tuple(videos)))))
assert doc('video source').length == 2
extensions = [os.path.splitext(el.attrib['src'])[1] for el in doc('video source')]
assert extensions == ['.webm', '.ogv']
@override_settings(STATIC_URL='/media/')
@patch('bedrock.mozorg.templatetags.misc.find_static', return_value=True)
class TestPlatformImg(TestCase):
rf = RequestFactory()
def _render(self, url, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ platform_img('{0}', {1}) }}}}".format(url, optional_attributes),
{'request': req})
def _render_l10n(self, url):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ l10n_img('{0}') }}}}".format(url),
{'request': req})
def test_platform_img_no_optional_attributes(self, find_static):
"""Should return expected markup without optional attributes"""
markup = self._render('test.png')
self.assertIn(u'data-src-windows="/media/test-windows.png"', markup)
self.assertIn(u'data-src-mac="/media/test-mac.png"', markup)
markup = self._render('img/test.png')
self.assertIn(u'data-src-windows="/media/img/test-windows.png"', markup)
self.assertIn(u'data-src-mac="/media/img/test-mac.png"', markup)
def test_platform_img_with_optional_attributes(self, find_static):
"""Should return expected markup with optional attributes"""
markup = self._render('test.png', {'data-test-attr': 'test'})
self.assertIn(u'data-test-attr="test"', markup)
def test_platform_img_with_high_res(self, find_static):
"""Should return expected markup with high resolution image attrs"""
markup = self._render('test.png', {'high-res': True})
self.assertIn(u'data-src-windows-high-res="/media/test-windows-high-res.png"', markup)
self.assertIn(u'data-src-mac-high-res="/media/test-mac-high-res.png"', markup)
self.assertIn(u'data-high-res="true"', markup)
markup = self._render('img/test.png', {'high-res': True})
self.assertIn(u'data-src-windows-high-res="/media/img/test-windows-high-res.png"', markup)
self.assertIn(u'data-src-mac-high-res="/media/img/test-mac-high-res.png"', markup)
self.assertIn(u'data-high-res="true"', markup)
def test_platform_img_with_l10n(self, find_static):
"""Should return expected markup with l10n image path"""
l10n_url_win = self._render_l10n('test-windows.png')
l10n_url_mac = self._render_l10n('test-mac.png')
markup = self._render('test.png', {'l10n': True})
self.assertIn(u'data-src-windows="' + l10n_url_win + '"', markup)
self.assertIn(u'data-src-mac="' + l10n_url_mac + '"', markup)
markup = self._render('/img/test.png', {'l10n': True})
self.assertIn(u'data-src-windows="' + l10n_url_win + '"', markup)
self.assertIn(u'data-src-mac="' + l10n_url_mac + '"', markup)
def test_platform_img_with_l10n_and_optional_attributes(self, find_static):
"""
Should return expected markup with l10n image path and optional
attributes
"""
l10n_url_win = self._render_l10n('test-windows.png')
l10n_url_mac = self._render_l10n('test-mac.png')
markup = self._render('test.png', {'l10n': True, 'data-test-attr': 'test'})
self.assertIn(u'data-src-windows="' + l10n_url_win + '"', markup)
self.assertIn(u'data-src-mac="' + l10n_url_mac + '"', markup)
self.assertIn(u'data-test-attr="test"', markup)
def test_platform_img_with_l10n_and_high_res(self, find_static):
"""
Should return expected markup with l10n image path and high resolution
attributes
"""
l10n_url_win = self._render_l10n('test-windows.png')
l10n_hr_url_win = misc.convert_to_high_res(l10n_url_win)
l10n_url_mac = self._render_l10n('test-mac.png')
l10n_hr_url_mac = misc.convert_to_high_res(l10n_url_mac)
markup = self._render('test.png', {'l10n': True, 'high-res': True})
self.assertIn(u'data-src-windows-high-res="' + l10n_hr_url_win + '"', markup)
self.assertIn(u'data-src-mac-high-res="' + l10n_hr_url_mac + '"', markup)
self.assertIn(u'data-high-res="true"', markup)
class TestPressBlogUrl(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
return render("{{{{ press_blog_url() }}}}".format('/'),
{'request': req})
def test_press_blog_url_no_locale(self):
"""No locale, fallback to default press blog"""
assert self._render('') == 'https://blog.mozilla.org/press/'
def test_press_blog_url_english(self):
"""en-US locale, default press blog"""
assert self._render('en-US') == 'https://blog.mozilla.org/press/'
def test_press_blog_url_europe(self):
"""Major European locales have their own blog"""
assert self._render('es-ES') == 'https://blog.mozilla.org/press-es/'
assert self._render('fr') == 'https://blog.mozilla.org/press-fr/'
assert self._render('de') == 'https://blog.mozilla.org/press-de/'
assert self._render('pl') == 'https://blog.mozilla.org/press-pl/'
assert self._render('it') == 'https://blog.mozilla.org/press-it/'
assert self._render('en-GB') == 'https://blog.mozilla.org/press-uk/'
def test_press_blog_url_latam(self):
"""South American Spanishes use the es-ES blog"""
assert self._render('es-AR') == 'https://blog.mozilla.org/press-es/'
assert self._render('es-CL') == 'https://blog.mozilla.org/press-es/'
assert self._render('es-MX') == 'https://blog.mozilla.org/press-es/'
def test_press_blog_url_brazil(self):
"""Brazilian Portuguese has its own br blog"""
assert self._render('pt-BR') == 'https://blog.mozilla.org/press-br/'
def test_press_blog_url_other_locale(self):
"""No blog for locale, fallback to default press blog"""
assert self._render('oc') == 'https://blog.mozilla.org/press/'
@override_settings(
DONATE_LINK=TEST_DONATE_LINK,
DONATE_PARAMS=TEST_DONATE_PARAMS,
)
class TestDonateUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, source=''):
req = self.rf.get('/')
req.locale = locale
return render("{{{{ donate_url('{0}') }}}}".format(source),
{'request': req})
def test_donate_url_no_locale(self):
"""No locale, fallback to default page"""
assert self._render('', 'mozillaorg_footer') == (
'https://donate.mozilla.org//'
'?presets=100,50,25,15&amount=50'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=usd')
def test_donate_url_english(self):
"""en-US locale, default page"""
assert self._render('en-US', 'mozillaorg_footer') == (
'https://donate.mozilla.org/en-US/'
'?presets=100,50,25,15&amount=50'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=usd')
def test_donate_url_spanish(self):
"""es-MX locale, a localized page"""
assert self._render('es-MX', 'mozillaorg_footer') == (
'https://donate.mozilla.org/es-MX/'
'?presets=100,50,25,15&amount=15'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=eur')
def test_donate_url_other_locale(self):
"""No page for locale, fallback to default page"""
assert self._render('pt-PT', 'mozillaorg_footer') == (
'https://donate.mozilla.org/pt-PT/'
'?presets=100,50,25,15&amount=50'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=usd')
@override_settings(FIREFOX_TWITTER_ACCOUNTS=TEST_FIREFOX_TWITTER_ACCOUNTS)
class TestFirefoxTwitterUrl(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
return render('{{ firefox_twitter_url() }}', {'request': req})
def test_firefox_twitter_url_no_locale(self):
"""No locale, fallback to default account"""
assert self._render('') == 'https://twitter.com/firefox'
def test_firefox_twitter_url_english(self):
"""en-US locale, default account"""
assert self._render('en-US') == 'https://twitter.com/firefox'
def test_firefox_twitter_url_spanish(self):
"""es-ES locale, a local account"""
assert self._render('es-ES') == 'https://twitter.com/firefox_es'
def test_firefox_twitter_url_portuguese(self):
"""pt-BR locale, a local account"""
assert self._render('pt-BR') == 'https://twitter.com/firefoxbrasil'
def test_firefox_twitter_url_other_locale(self):
"""No account for locale, fallback to default account"""
assert self._render('es-AR') == 'https://twitter.com/firefox'
assert self._render('es-CL') == 'https://twitter.com/firefox'
assert self._render('es-MX') == 'https://twitter.com/firefox'
assert self._render('pt-PT') == 'https://twitter.com/firefox'
@override_settings(STATIC_URL='/media/')
class TestHighResImg(TestCase):
rf = RequestFactory()
def _render(self, url, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ high_res_img('{0}', {1}) }}}}".format(url, optional_attributes),
{'request': req})
def _render_l10n(self, url):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ l10n_img('{0}') }}}}".format(url),
{'request': req})
def test_high_res_img_no_optional_attributes(self):
"""Should return expected markup without optional attributes"""
expected = (
u'<img class="" src="/media/img/test.png" '
u'srcset="/media/img/test-high-res.png 1.5x">')
markup = self._render('img/test.png')
self.assertEqual(markup, expected)
def test_high_res_img_with_optional_attributes(self):
"""Should return expected markup with optional attributes"""
markup = self._render('img/test.png', {'data-test-attr': 'test', 'class': 'logo'})
expected = (
u'<img class="logo" src="/media/img/test.png" '
u'srcset="/media/img/test-high-res.png 1.5x" '
u'data-test-attr="test">')
self.assertEqual(markup, expected)
def test_high_res_img_with_l10n(self):
"""Should return expected markup with l10n image path"""
l10n_url = self._render_l10n('test.png')
l10n_hr_url = misc.convert_to_high_res(l10n_url)
markup = self._render('test.png', {'l10n': True})
expected = (
u'<img class="" src="' + l10n_url + '" '
u'srcset="' + l10n_hr_url + ' 1.5x">')
self.assertEqual(markup, expected)
l10n_url = self._render_l10n('img/test.png')
l10n_hr_url = misc.convert_to_high_res(l10n_url)
markup = self._render('test.png', {'l10n': True})
expected = (
u'<img class="" src="' + l10n_url + '" '
u'srcset="' + l10n_hr_url + ' 1.5x">')
self.assertEqual(markup, expected)
def test_high_res_img_with_l10n_and_optional_attributes(self):
"""Should return expected markup with l10n image path"""
l10n_url = self._render_l10n('test.png')
l10n_hr_url = misc.convert_to_high_res(l10n_url)
markup = self._render('test.png', {'l10n': True, 'data-test-attr': 'test'})
expected = (
u'<img class="" src="' + l10n_url + '" '
u'srcset="' + l10n_hr_url + ' 1.5x" data-test-attr="test">')
self.assertEqual(markup, expected)
@override_settings(STATIC_URL='/media/')
class TestLazyImg(TestCase):
rf = RequestFactory()
def _render(self, image_url, placeholder_url, include_highres_image=False, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ lazy_img('{0}', '{1}', {2}, {3}) }}}}".format(image_url, placeholder_url, include_highres_image, optional_attributes),
{'request': req})
def test_lazy_img(self):
"""Should return expected markup"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png',
include_highres_image=True, optional_attributes={'class': 'the-dude', 'alt': 'abides', 'width': '300'})
expected = (
u'<div class="lazy-image-container">'
u'<img class="the-dude" src="/media/img/placeholder.png" data-src="/media/img/test.png" '
u'data-srcset="/media/img/test-high-res.png 2x" alt="abides" width="300">'
u'<noscript><img class="the-dude" src="/media/img/test.png" '
u'data-srcset="/media/img/test-high-res.png 2x" alt="abides" width="300"></noscript>'
u'</div>')
self.assertEqual(markup, expected)
def test_lazy_img_no_highres_image(self):
"""Should return no highres image"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png')
self.assertIn(u'src="/media/img/placeholder.png"', markup)
self.assertIn(u'data-src="/media/img/test.png"', markup)
self.assertNotIn(u'data-srcset="/media/img/test-high-res.png 2x"', markup)
def test_lazy_img_no_optional_attributes(self):
"""Should return default class and alt values if no optional attributes are provided"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png')
self.assertIn(u'class="lazy-image"', markup)
self.assertIn(u'alt=""', markup)
def test_lazy_img_optional_attributes(self):
"""Should return expected optional attributes"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png',
optional_attributes={'class': 'the-dude', 'alt': 'abides', 'width': '300'})
self.assertNotIn(u'class="lazy-image"', markup)
self.assertIn(u'class="the-dude"', markup)
self.assertIn(u'alt="abides"', markup)
self.assertIn(u'width="300"', markup)
def test_lazy_img_external(self):
"""Should allow an external image and ignore include_highres_image"""
markup = self._render(image_url='https://www.test.com/test.png', placeholder_url='img/placeholder.png',
include_highres_image=True)
self.assertIn(u'src="/media/img/placeholder.png"', markup)<|fim▁hole|>
class TestAbsoluteURLFilter(TestCase):
rf = RequestFactory()
static_url_dev = '/static/'
static_url_prod = '//mozorg.cdn.mozilla.net/static/'
static_url_full = 'https://mozorg.cdn.mozilla.net/static/'
image_path = 'img/mozorg/mozilla-256.jpg'
inline_template = "{{ static('%s')|absolute_url }}" % image_path
block_template = "{% filter absolute_url %}{% block page_image %}" + \
"{{ static('%s') }}" % image_path + "{% endblock %}{% endfilter %}"
def _render(self, template):
return render(template, {'request': self.rf.get('/')})
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_dev)
def test_image_dev(self):
"""Should return a fully qualified URL including a protocol"""
expected = settings.CANONICAL_URL + self.static_url_dev + self.image_path
assert self._render(self.inline_template) == expected
assert self._render(self.block_template) == expected
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_prod)
def test_image_prod(self):
"""Should return a fully qualified URL including a protocol"""
expected = 'https:' + self.static_url_prod + self.image_path
assert self._render(self.inline_template) == expected
assert self._render(self.block_template) == expected
@override_settings(DEV=False)
def test_urls(self):
"""Should return a fully qualified URL including a protocol"""
expected = 'https://www.mozilla.org/en-US/firefox/new/'
assert misc.absolute_url('/en-US/firefox/new/') == expected
assert misc.absolute_url('//www.mozilla.org/en-US/firefox/new/') == expected
assert misc.absolute_url('https://www.mozilla.org/en-US/firefox/new/') == expected
class TestFirefoxIOSURL(TestCase):
rf = RequestFactory()
def _render(self, locale, ct_param=None):
req = self.rf.get('/')
req.locale = locale
if ct_param:
return render("{{ firefox_ios_url('%s') }}" % ct_param,
{'request': req})
return render("{{ firefox_ios_url() }}", {'request': req})
def test_firefox_ios_url_no_locale(self):
"""No locale, fallback to default URL"""
assert (
self._render('') == 'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926')
def test_firefox_ios_url_default(self):
"""should fallback to default URL"""
assert (
self._render('ar') == 'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926')
assert (
self._render('zu') == 'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926')
def test_firefox_ios_url_localized(self):
"""should return localized URL"""
assert (
self._render('en-US') == 'https://itunes.apple.com/us'
'/app/firefox-private-safe-browser/id989804926')
assert (
self._render('es-ES') == 'https://itunes.apple.com/es'
'/app/firefox-private-safe-browser/id989804926')
assert (
self._render('ja') == 'https://itunes.apple.com/jp'
'/app/firefox-private-safe-browser/id989804926')
def test_firefox_ios_url_param(self):
"""should return default or localized URL with ct param"""
assert self._render('', 'mozorg') == (
'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926?ct=mozorg')
assert self._render('en-US', 'mozorg') == (
'https://itunes.apple.com/us'
'/app/firefox-private-safe-browser/id989804926?ct=mozorg')
assert self._render('es-ES', 'mozorg') == (
'https://itunes.apple.com/es'
'/app/firefox-private-safe-browser/id989804926?ct=mozorg')
# from jingo
def test_f():
s = render('{{ "{0} : {z}"|f("a", z="b") }}')
assert s == 'a : b'
def test_f_unicode():
s = render('{{ "foo {0}"|f(bar) }}', {'bar': u'bar\xe9'})
assert s == u'foo bar\xe9'
s = render('{{ t|f(bar) }}', {'t': u'\xe9 {0}', 'bar': 'baz'})
assert s == u'\xe9 baz'
format_string = 'Hello <b>{0}</b>'
format_markup = Markup(format_string)
val_string = '<em>Steve</em>'
val_markup = Markup(val_string)
@pytest.mark.parametrize('f, v', [
(format_string, val_string),
(format_string, val_markup),
(format_markup, val_string),
(format_markup, val_markup),
])
def test_f_markup(f, v):
expect = 'Hello <b><em>Steve</em></b>'
s = render('{{ fmt|f(val) }}', {'fmt': f, 'val': v})
assert expect == s
def test_datetime():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|datetime }}', {'d': time})
assert s == 'December 25, 2009'
s = render('{{ d|datetime("%Y-%m-%d %H:%M:%S") }}', {'d': time})
assert s == '2009-12-25 10:11:12'
s = render('{{ None|datetime }}')
assert s == ''
def test_datetime_unicode():
fmt = u"%Y 年 %m 月 %e 日"
misc.datetime(datetime.now(), fmt)
def test_ifeq():
eq_context = {'a': 1, 'b': 1}
neq_context = {'a': 1, 'b': 2}
s = render('{{ a|ifeq(b, "<b>something</b>") }}', eq_context)
assert s == '<b>something</b>'
s = render('{{ a|ifeq(b, "<b>something</b>") }}', neq_context)
assert s == ''
def test_csrf():
s = render('{{ csrf() }}', {'csrf_token': 'fffuuu'})
csrf = '<input type="hidden" name="csrfmiddlewaretoken" value="fffuuu">'
assert csrf in s
class TestAppStoreURL(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
product = 'lockwise'
return render("{{ app_store_url('%s') }}" % product,
{'request': req})
def test_app_store_url_no_locale(self):
"""No locale, fallback to default URL"""
assert (self._render('') == 'https://itunes.apple.com/app/id1314000270?mt=8')
def test_app_store_url_default(self):
"""should fallback to default URL"""
assert (self._render('ar') == 'https://itunes.apple.com/app/id1314000270?mt=8')
assert (self._render('zu') == 'https://itunes.apple.com/app/id1314000270?mt=8')
def test_app_store_url_localized(self):
"""should return localized URL"""
assert (self._render('en-US') == 'https://itunes.apple.com/us/app/id1314000270?mt=8')
assert (self._render('es-ES') == 'https://itunes.apple.com/es/app/id1314000270?mt=8')
assert (self._render('de') == 'https://itunes.apple.com/de/app/id1314000270?mt=8')
class TestPlayStoreURL(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
product = 'lockwise'
return render("{{ play_store_url('%s') }}" % product,
{'request': req})
def test_play_store_url_localized(self):
"""should return localized URL"""
assert (self._render('en-US') == 'https://play.google.com/store/apps/details?id=mozilla.lockbox&hl=en')
assert (self._render('es-ES') == 'https://play.google.com/store/apps/details?id=mozilla.lockbox&hl=es')
assert (self._render('de') == 'https://play.google.com/store/apps/details?id=mozilla.lockbox&hl=de')
class TestStructuredDataID(TestCase):
rf = RequestFactory()
def _render(self, locale, domain=None):
req = self.rf.get('/')
req.locale = locale
sd_id = 'firefoxbrowser'
if domain:
return render("{{{{ structured_data_id('{0}', '{1}') }}}}".format(
sd_id, domain), {'request': req})
return render("{{ structured_data_id('%s') }}" % sd_id,
{'request': req})
def test_structured_data_localized_id(self):
"""should return localized id"""
assert (self._render('en-US') == 'https://www.mozilla.org/#firefoxbrowser')
assert (self._render('es-ES') == 'https://www.mozilla.org/#firefoxbrowser-es-es')
assert (self._render('de') == 'https://www.mozilla.org/#firefoxbrowser-de')
def test_structured_data_custom_domain_id(self):
"""should return id for a custom domain"""
domain = 'https://foundation.mozilla.org'
assert (self._render('en-US', domain) == 'https://foundation.mozilla.org/#firefoxbrowser')
assert (self._render('es-ES', domain) == 'https://foundation.mozilla.org/#firefoxbrowser-es-es')
assert (self._render('de', domain) == 'https://foundation.mozilla.org/#firefoxbrowser-de')
class TestLangShort(TestCase):
rf = RequestFactory()
def _render(self, locale, domain=None):
req = self.rf.get('/')
req.locale = locale
return render("{{ lang_short() }}", {'request': req})
def test_shortened_locales(self):
"""should return a shortened locale code"""
assert (self._render('en-US') == 'en')
assert (self._render('es-ES') == 'es')
assert (self._render('de') == 'de')
class TestFirefoxAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ firefox_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ firefox_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_firefox_ios_adjust_url(self):
"""Firefox for mobile with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/2uo1qc?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Ffirefox-private-safe-browser%2Fid989804926'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_firefox_ios_adjust_url_creative(self):
"""Firefox for mobile with an App Store URL redirect and creative param"""
assert (
self._render('de', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/2uo1qc?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Ffirefox-private-safe-browser%2Fid989804926'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_firefox_android_adjust_url(self):
"""Firefox for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/2uo1qc?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dorg.mozilla.firefox'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_firefox_no_redirect_adjust_url(self):
"""Firefox for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/2uo1qc?'
'campaign=www.mozilla.org&adgroup=test-page')
class TestFocusAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ focus_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ focus_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_focus_ios_adjust_url(self):
"""Firefox Focus with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/b8s7qo?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Ffirefox-focus-privacy-browser%2Fid1055677337'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_focus_ios_adjust_url_creative(self):
"""Firefox Focus with an App Store URL redirect and creative param"""
assert (
self._render('fr', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/b8s7qo?'
'redirect=https%3A%2F%2Fitunes.apple.com%2Ffr%2Fapp%2Ffirefox-focus-privacy-browser%2Fid1055677337'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_focus_android_adjust_url(self):
"""Firefox Focus for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/b8s7qo?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dorg.mozilla.focus'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_focus_no_redirect_adjust_url(self):
"""Firefox Focus for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/b8s7qo?'
'campaign=www.mozilla.org&adgroup=test-page')
def test_klar_ios_adjust_url(self):
"""Firefox Klar with an App Store URL redirect"""
assert (
self._render('de', 'ios', 'test-page') == 'https://app.adjust.com/jfcx5x?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Fklar-by-firefox%2Fid1073435754'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_klar_android_adjust_url(self):
"""Firefox Klar for mobile with a Play Store redirect"""
assert (
self._render('de', 'android', 'test-page') == 'https://app.adjust.com/jfcx5x?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dorg.mozilla.klar'
'&campaign=www.mozilla.org&adgroup=test-page')
class TestLockwiseAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ lockwise_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ lockwise_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_lockwise_ios_adjust_url(self):
"""Firefox Lockwise for mobile with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/6tteyjo?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Fid1314000270%3Fmt%3D8'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_lockwise_ios_adjust_url_creative(self):
"""Firefox Lockwise for mobile with an App Store URL redirect and creative param"""
assert (
self._render('de', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/6tteyjo'
'?redirect=https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Fid1314000270%3Fmt%3D8'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_lockwise_android_adjust_url(self):
"""Firefox Lockwise for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/6tteyjo?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dmozilla.lockbox'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_lockwise_no_redirect_adjust_url(self):
"""Firefox Lockwise for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/6tteyjo'
'?campaign=www.mozilla.org&adgroup=test-page')
class TestPocketAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ pocket_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ pocket_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_pocket_ios_adjust_url(self):
"""Pocket for mobile with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/m54twk?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Fpocket-save-read-grow%2Fid309601447'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_pocket_ios_adjust_url_creative(self):
"""Pocket for mobile with an App Store URL redirect and creative param"""
assert (
self._render('de', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/m54twk?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Fpocket-save-read-grow%2Fid309601447'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_pocket_android_adjust_url(self):
"""Pocket for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/m54twk?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dcom.ideashower.readitlater.pro'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_pocket_no_redirect_adjust_url(self):
"""Pocket for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/m54twk?'
'campaign=www.mozilla.org&adgroup=test-page')
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
class TestPocketFxAButton(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, button_text, class_name=None, is_button_class=True, include_metrics=True,
optional_parameters=None, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ pocket_fxa_button('{0}', '{1}', '{2}', {3}, {4}, {5}, {6}) }}}}".format(
entrypoint, button_text, class_name, is_button_class, include_metrics,
optional_parameters, optional_attributes), {'request': req})
def test_pocket_fxa_button(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-pocket', button_text='Try Pocket Now',
class_name='pocket-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'s': 'ffpocket', 'foo': 'bar'},
optional_attributes={'data-cta-text': 'Try Pocket Now', 'data-cta-type': 'activate pocket',
'data-cta-position': 'primary'})
expected = (
u'<a href="https://getpocket.com/ff_signup?entrypoint=mozilla.org-firefox-pocket&form_type=button'
u'&utm_source=mozilla.org-firefox-pocket&utm_medium=referral&s=ffpocket&foo=bar" data-action="https://accounts.firefox.com/" '
u'class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product pocket-main-cta-button" '
u'data-cta-text="Try Pocket Now" data-cta-type="activate pocket" data-cta-position="primary">Try Pocket Now</a>')
self.assertEqual(markup, expected)
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
class TestMonitorFxAButton(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, button_text, class_name=None, is_button_class=False, include_metrics=True,
optional_parameters=None, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ monitor_fxa_button('{0}', '{1}', '{2}', {3}, {4}, {5}, {6}) }}}}".format(
entrypoint, button_text, class_name, is_button_class, include_metrics,
optional_parameters, optional_attributes), {'request': req})
def test_monitor_fxa_button(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-accounts', button_text='Sign In to Monitor',
class_name='monitor-main-cta-button', is_button_class=False, include_metrics=True,
optional_parameters={'utm_campaign': 'skyline'},
optional_attributes={'data-cta-text': 'Sign In to Monitor', 'data-cta-type':
'fxa-monitor', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://monitor.firefox.com/oauth/init?entrypoint=mozilla.org-firefox-accounts&form_type=button'
u'&utm_source=mozilla.org-firefox-accounts&utm_medium=referral&utm_campaign=skyline" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button '
u'monitor-main-cta-button" data-cta-text="Sign In to Monitor" data-cta-type="fxa-monitor" '
u'data-cta-position="primary">Sign In to Monitor</a>')
self.assertEqual(markup, expected)
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
@override_settings(FXA_ENDPOINT_MOZILLAONLINE=TEST_FXA_MOZILLAONLINE_ENDPOINT)
class TestFxAButton(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, button_text, action='signup', class_name=None, is_button_class=True, include_metrics=True,
optional_parameters=None, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ fxa_button('{0}', '{1}', '{2}', '{3}', {4}, {5}, {6}, {7}) }}}}".format(
entrypoint, button_text, action, class_name, is_button_class, include_metrics,
optional_parameters, optional_attributes), {'request': req})
def test_fxa_button_signup(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', button_text='Sign Up', action='signup',
class_name='fxa-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'utm_campaign': 'whatsnew73'},
optional_attributes={'data-cta-text': 'Sign Up', 'data-cta-type':
'fxa-sync', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://accounts.firefox.com/signup?entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product '
u'fxa-main-cta-button" data-cta-text="Sign Up" data-cta-type="fxa-sync" data-cta-position="primary" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/signup?entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/">Sign Up</a>')
self.assertEqual(markup, expected)
def test_fxa_button_signin(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', button_text='Sign In', action='signin',
class_name='fxa-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'utm_campaign': 'whatsnew73'},
optional_attributes={'data-cta-text': 'Sign In', 'data-cta-type':
'fxa-sync', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://accounts.firefox.com/signin?entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product '
u'fxa-main-cta-button" data-cta-text="Sign In" data-cta-type="fxa-sync" data-cta-position="primary" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/signin?entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/">Sign In</a>')
self.assertEqual(markup, expected)
def test_fxa_button_email(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', button_text='Sign Up', action='email',
class_name='fxa-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'utm_campaign': 'whatsnew73'},
optional_attributes={'data-cta-text': 'Sign Up', 'data-cta-type':
'fxa-sync', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://accounts.firefox.com/?action=email&entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product '
u'fxa-main-cta-button" data-cta-text="Sign Up" data-cta-type="fxa-sync" data-cta-position="primary" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/?action=email&entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/">Sign Up</a>')
self.assertEqual(markup, expected)
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
@override_settings(FXA_ENDPOINT_MOZILLAONLINE=TEST_FXA_MOZILLAONLINE_ENDPOINT)
class TestFxALinkFragment(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, action='signup', optional_parameters=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ fxa_link_fragment('{0}', '{1}', {2}) }}}}".format(
entrypoint, action, optional_parameters), {'request': req})
def test_fxa_button_signup(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', action='signup',
optional_parameters={'utm_campaign': 'whatsnew73'})
expected = (
u'href="https://accounts.firefox.com/signup?entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/signup?entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/"')
self.assertEqual(markup, expected)<|fim▁end|> | self.assertIn(u'data-src="https://www.test.com/test.png"', markup)
self.assertNotIn(u'data-srcset="', markup) |
<|file_name|>lzwhutf16-min.js<|end_file_name|><|fim▁begin|>// Copyright © 2016 Gary W. Hudson Esq.
// Released under GNU GPL 3.0
var lzwh = (function() {var z={
Decode:function(p)
{function f(){--h?k>>=1:(k=p.charCodeAt(q++)-32,h=15);return k&1}var h=1,q=0,k=0,e=[""],l=[],g=0,m=0,c="",d,a=0,n,b;
do{m&&(e[g-1]=c.charAt(0));m=a;l.push(c);d=0;for(a=g++;d!=a;)f()?d=(d+a>>1)+1:a=d+a>>1;if(d)c=l[d]+e[d],e[g]=c.charAt(0)
else{b=1;do for(n=8;n--;b*=2)d+=b*f();while(f());d&&(c=String.fromCharCode(d-1),e[g]="")}}while(d);return l.join("")},
Encode:function(p)
{function f(b){b&&(k|=e);16384==e?(q.push(String.fromCharCode(k+32)),e=1,k=0):e<<=1}function h(b,d){for(var a=0,e,c=l++;a!=c;)
e=a+c>>1,b>e?(a=e+1,f(1)):(c=e,f(0));if(!a){-1!=b&&(a=d+1);do{for(c=8;c--;a=(a-a%2)/2)f(a%2);f(a)}while(a)}}for(var q=[],k=0,
e=1,l=0,g=[],m=[],c=0,d=p.length,a,n,b=0;c<d;)a=p.charCodeAt(c++),g[b]?(n=g[b].indexOf(a),-1==n?(g[b].push(a),m[b].push(l+1),
<|fim▁hole|>c-=b?1:0,h(b,a),b=0):b=m[b][n]):(g[b]=[a],m[b]=[l+1],c-=b?1:0,h(b,a),b=0);b&&h(b,0);for(h(-1,0);1!=e;)f(0);return q.join("")}
};return z})();
if(typeof define==='function'&&define.amd)define(function(){return lzw})
else if(typeof module!=='undefined'&&module!=null)module.exports=lzw;<|fim▁end|> | |
<|file_name|>AutoGridNoWrap.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import Paper from '@material-ui/core/Paper';
import { makeStyles, createStyles, Theme } from '@material-ui/core/styles';
import Grid from '@material-ui/core/Grid';
import Avatar from '@material-ui/core/Avatar';
import Typography from '@material-ui/core/Typography';
const useStyles = makeStyles((theme: Theme) =>
createStyles({
root: {
flexGrow: 1,
overflow: 'hidden',
padding: theme.spacing(0, 3),
},
paper: {
maxWidth: 400,
margin: `${theme.spacing(1)}px auto`,
padding: theme.spacing(2),
},
}),
);
const message = `Truncation should be conditionally applicable on this long line of text
as this is a much longer line than what the container can support. `;
export default function AutoGridNoWrap() {
const classes = useStyles();
return (
<div className={classes.root}>
<Paper className={classes.paper}>
<Grid container wrap="nowrap" spacing={2}>
<Grid item>
<Avatar>W</Avatar>
</Grid>
<Grid item xs zeroMinWidth>
<Typography noWrap>{message}</Typography>
</Grid>
</Grid>
</Paper>
<Paper className={classes.paper}><|fim▁hole|> </Grid>
<Grid item xs>
<Typography noWrap>{message}</Typography>
</Grid>
</Grid>
</Paper>
<Paper className={classes.paper}>
<Grid container wrap="nowrap" spacing={2}>
<Grid item>
<Avatar>W</Avatar>
</Grid>
<Grid item xs>
<Typography>{message}</Typography>
</Grid>
</Grid>
</Paper>
</div>
);
}<|fim▁end|> | <Grid container wrap="nowrap" spacing={2}>
<Grid item>
<Avatar>W</Avatar> |
<|file_name|>post_import_image.py<|end_file_name|><|fim▁begin|>"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from osbs.api import OSBS
from osbs.conf import Configuration
from atomic_reactor.plugin import PostBuildPlugin
from atomic_reactor.util import ImageName
class ImportImagePlugin(PostBuildPlugin):
"""
Import image tags from external docker registry into OpenShift.
"""
key = "import_image"
can_fail = False
def __init__(self, tasker, workflow, url, verify_ssl=True, use_auth=True):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param url: str, URL to OSv3 instance
:param verify_ssl: bool, verify SSL certificate?
:param use_auth: bool, initiate authentication with openshift?
"""
# call parent constructor
super(ImportImagePlugin, self).__init__(tasker, workflow)
self.url = url
self.verify_ssl = verify_ssl
self.use_auth = use_auth
def run(self):
try:
build_json = json.loads(os.environ["BUILD"])
except KeyError:
self.log.error("No $BUILD env variable. "
"Probably not running in build container.")
raise
osbs_conf = Configuration(conf_file=None, openshift_uri=self.url,
use_auth=self.use_auth,
verify_ssl=self.verify_ssl)
osbs = OSBS(osbs_conf, osbs_conf)
metadata = build_json.get("metadata", {})<|fim▁hole|> labels = metadata.get("labels", {})
try:
imagestream = labels["imagestream"]
except KeyError:
self.log.error("No imagestream label set for this Build")
raise
self.log.info("Importing tags for %s", imagestream)
osbs.import_image(imagestream, **kwargs)<|fim▁end|> | kwargs = {}
if 'namespace' in metadata:
kwargs['namespace'] = metadata['namespace']
|
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2015 The Qt Company Ltd.
** Contact: http://www.qt.io/licensing/
**
** This file is part of the documentation of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:BSD$
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of The Qt Company Ltd nor the names of its
** contributors may be used to endorse or promote products derived<|fim▁hole|>** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include "piechart.h"
#include "pieslice.h"
#include <qdeclarative.h>
#include <QDeclarativeView>
#include <QApplication>
int main(int argc, char *argv[])
{
QApplication app(argc, argv);
qmlRegisterType<PieChart>("Charts", 1, 0, "PieChart");
qmlRegisterType<PieSlice>("Charts", 1, 0, "PieSlice");
QDeclarativeView view;
view.setSource(QUrl::fromLocalFile("app.qml"));
view.show();
return app.exec();
}<|fim▁end|> | ** from this software without specific prior written permission.
**
** |
<|file_name|>bootstrap.py<|end_file_name|><|fim▁begin|># Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deferred tasks for bootstrapping the GnG app."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import functools
import inspect
import logging
import os
import sys
from distutils import version
from google.appengine.ext import deferred
from loaner.web_app import constants
from loaner.web_app.backend.clients import bigquery
from loaner.web_app.backend.clients import directory
from loaner.web_app.backend.lib import datastore_yaml
from loaner.web_app.backend.lib import user
from loaner.web_app.backend.lib import utils
from loaner.web_app.backend.models import bootstrap_status_model
from loaner.web_app.backend.models import config_model
_ORG_UNIT_EXISTS_MSG = 'Org unit %s already exists, so cannot create.'
_TASK_DESCRIPTIONS = {
'bootstrap_datastore_yaml': 'Importing datastore YAML file',
'bootstrap_chrome_ous': 'Creating Chrome OUs in Directory',
'bootstrap_bq_history': 'Configuring datastore history tables in BigQuery',
'bootstrap_load_config_yaml': 'Loading config_defaults.yaml into datastore.'
}
# Tasks that should only be run for a new deployment, i.e. they are destructive.
_BOOTSTRAP_INIT_TASKS = (
'bootstrap_datastore_yaml',
'bootstrap_load_config_yaml'
)
# Tasks that should be run for an update or can rerun, i.e. they are idempotent.
_BOOTSTRAP_UPDATE_TASKS = tuple(
set(_TASK_DESCRIPTIONS.keys()) - set(_BOOTSTRAP_INIT_TASKS)
)
class Error(Exception):
"""Exception raised when master method called but ENABLE_BOOTSTRAP False."""
def managed_task(task_function):
"""Decorator to manage task methods.
This records the status of the task in an entity and raises the
deferred.PermanentTaskFailure exception to prevent tasks from repeating upon
failure. In such cases, the exception message is recorded to the entity.
Args:
task_function: function, to be managed by the decorator.
Returns:
Wrapped function.
Raises:
deferred.PermanentTaskFailure: if anything at all goes wrong.
"""
@functools.wraps(task_function)
def wrapper(*args, **kwargs):
"""Wrapper for managed task decorator."""
status_entity = bootstrap_status_model.BootstrapStatus.get_or_insert(
task_function.__name__)
status_entity.description = _TASK_DESCRIPTIONS.get(
task_function.__name__, task_function.__name__)
status_entity.timestamp = datetime.datetime.utcnow()
try:
task_function(*args, **kwargs)
status_entity.success = True
status_entity.details = None
status_entity.put()
except Exception as e:
status_entity.success = False
status_entity.details = '{} {}'.format(str(type(e)), str(e))
status_entity.put()
raise deferred.PermanentTaskFailure(
'Task {} failed; error: {}'.format(
task_function.__name__, status_entity.details))
return wrapper
@managed_task
def bootstrap_datastore_yaml(wipe=True, **kwargs):
"""Bootstraps arbitrary datastore entities from supplied YAML input.
Args:
wipe: bool, whether to wipe all existing datastore models for any model
contained in the YAML.
**kwargs: keyword args including a user_email with which to run the
datastore methods (required for BigQuery streaming).
"""
with open(
os.path.join(os.path.dirname(__file__), 'bootstrap.yaml')) as yaml_file:
datastore_yaml.import_yaml(yaml_file.read(), kwargs['user_email'], wipe)
@managed_task
def bootstrap_chrome_ous(**kwargs):
"""Bootstraps Chrome device OUs.
Args:
**kwargs: keyword args including a user_email with which to run the
Directory API client methods (required for BigQuery streaming).
"""
logging.info('Requesting delegated admin for bootstrap')
client = directory.DirectoryApiClient(user_email=kwargs['user_email'])
for org_unit_name, org_unit_path in constants.ORG_UNIT_DICT.iteritems():
logging.info(
'Creating org unit %s at path %s ...', org_unit_name, org_unit_path)
if client.get_org_unit(org_unit_path):
logging.warn(_ORG_UNIT_EXISTS_MSG, org_unit_name)
else:
client.insert_org_unit(org_unit_path)
@managed_task
def bootstrap_bq_history(**kwargs):
"""Bootstraps BigQuery history tables for archival purposes.
Args:
**kwargs: keyword args including a user_email with which to run the
Directory API client methods (required for BigQuery streaming).
"""
del kwargs # Unused, but comes by default.
client = bigquery.BigQueryClient()
client.initialize_tables()
@managed_task
def bootstrap_load_config_yaml(**kwargs):
"""Loads config_defaults.yaml into datastore.
Args:
**kwargs: Unused, but required for bootstrap tasks.
"""
del kwargs # Unused, but comes by default.
config_defaults = utils.load_config_from_yaml()
for name, value in config_defaults.iteritems():
if name == 'bootstrap_started':
config_model.Config.set(name, config_model.Config.get(name), False)
else:
config_model.Config.set(name, value, False)
def get_bootstrap_functions(get_all=False):
"""Gets all functions necessary for bootstrap.
This function collects only the functions necessary for the bootstrap
process. Specifically, it will collect tasks specific to a new or existing
deployment (an update). Additionally, it will collect any failed tasks so that
they can be attempted again.
Args:
get_all: bool, return all bootstrap tasks, defaults to False.
Returns:
Dict, all functions necessary for bootstrap.
"""
module_functions = inspect.getmembers(
sys.modules[__name__], inspect.isfunction)
bootstrap_functions = {
key: value
for key, value in dict(module_functions)
.iteritems() if key.startswith('bootstrap_')
}
if get_all or _is_new_deployment():
return bootstrap_functions
if is_update():
bootstrap_functions = {
key: value for key, value in bootstrap_functions.iteritems()
if key in _BOOTSTRAP_UPDATE_TASKS
}
else: # Collect all bootstrap functions that failed and all update tasks.
for function_name in bootstrap_functions.keys():
status_entity = bootstrap_status_model.BootstrapStatus.get_by_id(
function_name)
if (status_entity and
status_entity.success and
function_name not in _BOOTSTRAP_UPDATE_TASKS):
del bootstrap_functions[function_name]
return bootstrap_functions
def _run_function_as_task(all_functions_list, function_name, kwargs=None):
"""Runs a specific function and its kwargs as an AppEngine task.<|fim▁hole|> Args:
all_functions_list: string list, A list with all function names that are
registered as bootstrap functions on the Loaner app.
function_name: string, A specific function that should be ran as a task.
kwargs: dict, Optional kwargs to be passed to the function that will run.
Returns:
The deferred task from AppEngine taskqueue.
Raises:
Error: if requested bootstrap method is not allowed or does not exist.
"""
logging.debug('Running %s as a task.', function_name)
function = all_functions_list.get(function_name)
if function is None:
raise Error(
'Requested bootstrap method {} does not exist.'.format(function_name))
if not kwargs:
kwargs = {}
kwargs['user_email'] = user.get_user_email()
return deferred.defer(function, **kwargs)
def run_bootstrap(requested_tasks=None):
"""Runs one or more bootstrap functions.
Args:
requested_tasks: dict, wherein the keys are function names and the
values are keyword arg dicts. If no functions are passed, runs all
necessary bootstrap functions with no specific kwargs.
Returns:
A dictionary of started tasks, with the task names as keys and the values
being task descriptions as found in _TASK_DESCRIPTIONS.
"""
config_model.Config.set('bootstrap_started', True)
bootstrap_functions = get_bootstrap_functions()
if _is_new_deployment():
logging.info('Running bootstrap for a new deployment.')
else:
logging.info(
'Running bootstrap for an update from version %s to %s.',
config_model.Config.get('running_version'),
constants.APP_VERSION)
run_status_dict = {}
if requested_tasks:
for function_name, kwargs in requested_tasks.iteritems():
_run_function_as_task(bootstrap_functions, function_name, kwargs)
run_status_dict[function_name] = _TASK_DESCRIPTIONS.get(
function_name, function_name)
else:
logging.debug('Running all functions as no specific function was passed.')
for function_name in bootstrap_functions:
_run_function_as_task(bootstrap_functions, function_name)
run_status_dict[function_name] = _TASK_DESCRIPTIONS.get(
function_name, function_name)
return run_status_dict
def _is_new_deployment():
"""Checks whether this is a new deployment.
A '0.0' version number and a missing bootstrap_datastore_yaml task
status indicates that this is a new deployment. The latter check
is to support backward-compatibility with early alpha versions that did not
have a version number.
Returns:
True if this is a new deployment, else False.
"""
return (config_model.Config.get('running_version') == '0.0' and
not bootstrap_status_model.BootstrapStatus.get_by_id(
'bootstrap_datastore_yaml'))
def _is_latest_version():
"""Checks if the app is up to date and sets bootstrap to incomplete if not.
Checks whether the running version is the same as the deployed version as an
app that is not updated should trigger bootstrap moving back to an incomplete
state, thus signaling that certain tasks need to be run again.
Returns:
True if running matches deployed version and not a new install, else False.
"""
if _is_new_deployment():
return False
up_to_date = version.LooseVersion(
constants.APP_VERSION) == version.LooseVersion(
config_model.Config.get('running_version'))
if not up_to_date and not is_bootstrap_started():
# Set the updates tasks to incomplete so that they run again.
config_model.Config.set('bootstrap_completed', False)
for task in _BOOTSTRAP_UPDATE_TASKS:
status_entity = bootstrap_status_model.BootstrapStatus.get_or_insert(task)
status_entity.success = False
status_entity.put()
return up_to_date
def is_update():
"""Checks whether the application is in a state requiring an update.
Returns:
True if an update is available and this is not a new installation.
"""
if _is_new_deployment():
return False
return version.LooseVersion(constants.APP_VERSION) > version.LooseVersion(
config_model.Config.get('running_version'))
def is_bootstrap_completed():
"""Gets the general status of the app bootstrap.
Ensures that the latest version is running and that bootstrap has completed.
Returns:
True if the bootstrap is complete, else False.
"""
return (_is_latest_version() and
config_model.Config.get('bootstrap_completed'))
def is_bootstrap_started():
"""Checks to see if bootstrap has started.
Returns:
True if the bootstrap has started, else False.
"""
if (config_model.Config.get('bootstrap_started') and
config_model.Config.get('bootstrap_completed')):
# If bootstrap was completed indicate that it is no longer in progress.
config_model.Config.set('bootstrap_started', False)
return config_model.Config.get('bootstrap_started')
def get_bootstrap_task_status():
"""Gets the status of the bootstrap tasks.
Additionally, this sets the overall completion status if the tasks were
successful and sets the running version number after bootstrap completion.
Returns:
Dictionary with task names as the keys and values being sub-dictionaries
containing data derived from the datastore entities. If there is no data
for any given task, its place is held by an empty dict.
"""
bootstrap_completed = True
bootstrap_task_status = {}
for function_name in get_bootstrap_functions(get_all=True):
status_entity = bootstrap_status_model.BootstrapStatus.get_by_id(
function_name)
if status_entity:
bootstrap_task_status[function_name] = status_entity.to_dict()
else:
bootstrap_task_status[function_name] = {}
if not bootstrap_task_status[function_name].get('success'):
bootstrap_completed = False
if bootstrap_completed:
config_model.Config.set(
'running_version', constants.APP_VERSION)
logging.info(
'Successfully bootstrapped application to version %s.',
constants.APP_VERSION)
config_model.Config.set('bootstrap_completed', bootstrap_completed)
return bootstrap_task_status<|fim▁end|> | |
<|file_name|>test_relationship.py<|end_file_name|><|fim▁begin|>from sqlalchemy.orm import create_session, relationship, mapper, \
contains_eager, joinedload, subqueryload, subqueryload_all,\
Session, aliased, with_polymorphic
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, fixtures
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import assert_raises, eq_, is_
class Company(fixtures.ComparableEntity):
pass
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(fixtures.ComparableEntity):
pass
class Paperwork(fixtures.ComparableEntity):
pass
class SelfReferentialTestJoinedToBase(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('people.person_id')))
@classmethod
def setup_mappers(cls):
engineers, people = cls.tables.engineers, cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
inherit_condition=engineers.c.person_id == people.c.person_id,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Person,
primaryjoin=
people.c.person_id == engineers.c.reports_to_id)})
def test_has(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Person.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_oftype_aliases_in_exists(self):
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++', reports_to=e1)
sess = create_session()
sess.add_all([e1, e2])
sess.flush()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to
.of_type(Engineer)
.has(Engineer.name == 'dilbert'))
.first(),
e2)
def test_join(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Person.name == 'dogbert').first(),
Engineer(name='dilbert'))
class SelfReferentialJ2JTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('managers.person_id'))
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
managers = cls.tables.managers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Manager, managers,
inherits=Person,
polymorphic_identity='manager')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Manager,
primaryjoin=
managers.c.person_id == engineers.c.reports_to_id,
backref='engineers')})
def test_has(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Manager.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_join(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Manager.name == 'dogbert').first(),
Engineer(name='dilbert'))
def test_filter_aliasing(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='wally', primary_language='java', reports_to=m1)
e2 = Engineer(name='dilbert', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add_all([m1, m2, e1, e2, e3])
sess.flush()
sess.expunge_all()
# filter aliasing applied to Engineer doesn't whack Manager
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Manager.name == 'dogbert').all(),
[m1])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.name == 'dilbert').all(),
[m2])
eq_(sess.query(Manager, Engineer)
.join(Manager.engineers)
.order_by(Manager.name.desc()).all(),
[(m2, e2), (m1, e1)])
def test_relationship_compare(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
e2 = Engineer(name='wally', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add(m1)
sess.add(m2)
sess.add(e1)
sess.add(e2)
sess.add(e3)
sess.flush()
sess.expunge_all()
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == m1).all(),
[m1])
class SelfReferentialJ2JSelfTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('reports_to_id', Integer,
ForeignKey('engineers.person_id')))
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Engineer,
primaryjoin=
engineers.c.person_id == engineers.c.reports_to_id,
backref='engineers',
remote_side=engineers.c.person_id)})
def _two_obj_fixture(self):
e1 = Engineer(name='wally')
e2 = Engineer(name='dilbert', reports_to=e1)
sess = Session()
sess.add_all([e1, e2])
sess.commit()
return sess
def _five_obj_fixture(self):
sess = Session()
e1, e2, e3, e4, e5 = [
Engineer(name='e%d' % (i + 1)) for i in range(5)
]
e3.reports_to = e1
e4.reports_to = e2
sess.add_all([e1, e2, e3, e4, e5])
sess.commit()
return sess
def test_has(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Engineer.name == 'wally'))
.first(),
Engineer(name='dilbert'))
def test_join_explicit_alias(self):
sess = self._five_obj_fixture()
ea = aliased(Engineer)
eq_(sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(Engineer.name == 'e1').all(),
[Engineer(name='e1')])
def test_join_aliased_flag_one(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Engineer.name == 'wally').first(),
Engineer(name='dilbert'))
def test_join_aliased_flag_two(self):
sess = self._five_obj_fixture()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.name == 'e4').all(),
[Engineer(name='e2')])
def test_relationship_compare(self):
sess = self._five_obj_fixture()
e1 = sess.query(Engineer).filter_by(name='e1').one()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == e1).all(),
[e1])
class M2MFilterTest(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
organizations = Table('organizations', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))<|fim▁hole|> ForeignKey('organizations.id')),
Column('engineer_id', Integer,
ForeignKey('engineers.person_id')))
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)))
@classmethod
def setup_mappers(cls):
organizations = cls.tables.organizations
people = cls.tables.people
engineers = cls.tables.engineers
engineers_to_org = cls.tables.engineers_to_org
class Organization(cls.Comparable):
pass
mapper(Organization, organizations,
properties={
'engineers':relationship(
Engineer,
secondary=engineers_to_org,
backref='organizations')})
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer')
@classmethod
def insert_data(cls):
Organization = cls.classes.Organization
e1 = Engineer(name='e1')
e2 = Engineer(name='e2')
e3 = Engineer(name='e3')
e4 = Engineer(name='e4')
org1 = Organization(name='org1', engineers=[e1, e2])
org2 = Organization(name='org2', engineers=[e3, e4])
sess = create_session()
sess.add(org1)
sess.add(org2)
sess.flush()
def test_not_contains(self):
Organization = self.classes.Organization
sess = create_session()
e1 = sess.query(Person).filter(Engineer.name == 'e1').one()
eq_(sess.query(Organization)
.filter(~Organization.engineers
.of_type(Engineer)
.contains(e1))
.all(),
[Organization(name='org2')])
# this had a bug
eq_(sess.query(Organization)
.filter(~Organization.engineers
.contains(e1))
.all(),
[Organization(name='org2')])
def test_any(self):
sess = create_session()
Organization = self.classes.Organization
eq_(sess.query(Organization)
.filter(Organization.engineers
.of_type(Engineer)
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
eq_(sess.query(Organization)
.filter(Organization.engineers
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
class SelfReferentialM2MTest(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table('secondary', metadata,
Column('left_id', Integer,
ForeignKey('parent.id'),
nullable=False),
Column('right_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('cls', String(50)))
Table('child1', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
Table('child2', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1 = cls.tables.child1
child2 = cls.tables.child2
Parent = cls.classes.Parent
parent = cls.tables.parent
Child1 = cls.classes.Child1
Child2 = cls.classes.Child2
secondary = cls.tables.secondary
mapper(Parent, parent,
polymorphic_on=parent.c.cls)
mapper(Child1, child1,
inherits=Parent,
polymorphic_identity='child1',
properties={
'left_child2':relationship(
Child2,
secondary=secondary,
primaryjoin=parent.c.id == secondary.c.right_id,
secondaryjoin=parent.c.id == secondary.c.left_id,
uselist=False,
backref="right_children")})
mapper(Child2, child2,
inherits=Parent,
polymorphic_identity='child2')
def test_query_crit(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c11, c12, c13 = Child1(), Child1(), Child1()
c21, c22, c23 = Child2(), Child2(), Child2()
c11.left_child2 = c22
c12.left_child2 = c22
c13.left_child2 = c23
sess.add_all([c11, c12, c13, c21, c22, c23])
sess.flush()
# test that the join to Child2 doesn't alias Child1 in the select
eq_(set(sess.query(Child1).join(Child1.left_child2)),
set([c11, c12, c13]))
eq_(set(sess.query(Child1, Child2).join(Child1.left_child2)),
set([(c11, c22), (c12, c22), (c13, c23)]))
# test __eq__() on property is annotating correctly
eq_(set(sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)),
set([c22]))
# test the same again
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)
.with_labels().statement,
"SELECT child2.id AS child2_id, parent.id AS parent_id, "
"parent.cls AS parent_cls FROM secondary AS secondary_1, "
"parent JOIN child2 ON parent.id = child2.id JOIN secondary AS "
"secondary_2 ON parent.id = secondary_2.left_id JOIN "
"(parent AS parent_1 JOIN child1 AS child1_1 ON parent_1.id = child1_1.id) "
"ON parent_1.id = secondary_2.right_id WHERE "
"parent_1.id = secondary_1.right_id AND :param_1 = "
"secondary_1.left_id"
)
def test_eager_join(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
# test that the splicing of the join works here, doesn't break in
# the middle of "parent join child1"
q = sess.query(Child1).options(joinedload('left_child2'))
self.assert_compile(q.limit(1).with_labels().statement,
"SELECT anon_1.child1_id AS anon_1_child1_id, anon_1.parent_id "
"AS anon_1_parent_id, anon_1.parent_cls AS anon_1_parent_cls, "
"child2_1.id AS child2_1_id, parent_1.id AS "
"parent_1_id, parent_1.cls AS parent_1_cls FROM "
"(SELECT child1.id AS child1_id, parent.id AS parent_id, "
"parent.cls AS parent_cls "
"FROM parent JOIN child1 ON parent.id = child1.id "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN "
"(secondary AS secondary_1 JOIN "
"(parent AS parent_1 JOIN child2 AS child2_1 "
"ON parent_1.id = child2_1.id) ON parent_1.id = secondary_1.left_id) "
"ON anon_1.parent_id = secondary_1.right_id",
{'param_1':1})
# another way to check
assert q.limit(1).with_labels().subquery().count().scalar() == 1
assert q.first() is c1
def test_subquery_load(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
sess.expunge_all()
query_ = sess.query(Child1).options(subqueryload('left_child2'))
for row in query_.all():
assert row.left_child2
class EagerToSubclassTest(fixtures.MappedTest):
"""Test eager loads to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('related_id', Integer,
ForeignKey('related.id')))
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('parent_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('related', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
class Related(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
related = cls.tables.related
Related = cls.classes.Related
mapper(Parent, parent,
properties={'children':relationship(Sub, order_by=sub.c.data)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b',
properties={'related':relationship(Related)})
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
mapper(Related, related)
@classmethod
def insert_data(cls):
global p1, p2
Parent = cls.classes.Parent
Sub = cls.classes.Sub
Related = cls.classes.Related
sess = Session()
r1, r2 = Related(data='r1'), Related(data='r2')
s1 = Sub(data='s1', related=r1)
s2 = Sub(data='s2', related=r2)
s3 = Sub(data='s3')
s4 = Sub(data='s4', related=r2)
s5 = Sub(data='s5')
p1 = Parent(data='p1', children=[s1, s2, s3])
p2 = Parent(data='p2', children=[s4, s5])
sess.add(p1)
sess.add(p2)
sess.commit()
def test_joinedload(self):
Parent = self.classes.Parent
sess = Session()
def go():
eq_(sess.query(Parent)
.options(joinedload(Parent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Parent = self.classes.Parent
Sub = self.classes.Sub
sess = Session()
def go():
eq_(sess.query(Parent)
.join(Parent.children)
.options(contains_eager(Parent.children))
.order_by(Parent.data, Sub.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subq_through_related(self):
Parent = self.classes.Parent
Base = self.classes.Base
sess = Session()
def go():
eq_(sess.query(Parent)
.options(subqueryload_all(Parent.children, Base.related))
.order_by(Parent.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
def test_subq_through_related_aliased(self):
Parent = self.classes.Parent
Base = self.classes.Base
pa = aliased(Parent)
sess = Session()
def go():
eq_(sess.query(pa)
.options(subqueryload_all(pa.children, Base.related))
.order_by(pa.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
class SubClassEagerToSubClassTest(fixtures.MappedTest):
"""Test joinedloads from subclass to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
)
Table('subparent', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True),
Column('data', String(10)),
)
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
)
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('subparent_id', Integer,
ForeignKey('subparent.id'),
nullable=False)
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Subparent(Parent):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
subparent = cls.tables.subparent
Subparent = cls.classes.Subparent
mapper(Parent, parent,
polymorphic_on=parent.c.type,
polymorphic_identity='b')
mapper(Subparent, subparent,
inherits=Parent,
polymorphic_identity='s',
properties={
'children':relationship(Sub, order_by=base.c.id)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b')
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
@classmethod
def insert_data(cls):
global p1, p2
Sub, Subparent = cls.classes.Sub, cls.classes.Subparent
sess = create_session()
p1 = Subparent(
data='p1',
children=[Sub(data='s1'), Sub(data='s2'), Sub(data='s3')])
p2 = Subparent(
data='p2',
children=[Sub(data='s4'), Sub(data='s5')])
sess.add(p1)
sess.add(p2)
sess.flush()
def test_joinedload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(joinedload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(joinedload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subqueryload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(subqueryload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(subqueryload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
class SameNamedPropTwoPolymorphicSubClassesTest(fixtures.MappedTest):
"""test pathing when two subclasses contain a different property
for the same name, and polymorphic loading is used.
#2614
"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('btod', metadata,
Column('bid', Integer, ForeignKey('b.id'), nullable=False),
Column('did', Integer, ForeignKey('d.id'), nullable=False)
)
Table('c', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('ctod', metadata,
Column('cid', Integer, ForeignKey('c.id'), nullable=False),
Column('did', Integer, ForeignKey('d.id'), nullable=False)
)
Table('d', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A = cls.classes.A
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type)
mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b',
properties={
'related': relationship(D, secondary=cls.tables.btod)
})
mapper(C, cls.tables.c, inherits=A, polymorphic_identity='c',
properties={
'related': relationship(D, secondary=cls.tables.ctod)
})
mapper(D, cls.tables.d)
@classmethod
def insert_data(cls):
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
session = Session()
d = D()
session.add_all([
B(related=[d]),
C(related=[d])
])
session.commit()
def test_free_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).\
options(
subqueryload(a_poly.B.related),
subqueryload(a_poly.C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_fixed_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
def go():
for a in session.query(A).with_polymorphic([B, C]).\
options(subqueryload(B.related), subqueryload(C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_free_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).\
options(
joinedload(a_poly.B.related),
joinedload(a_poly.C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
def test_fixed_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
def go():
for a in session.query(A).with_polymorphic([B, C]).\
options(joinedload(B.related), joinedload(C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
class SubClassToSubClassFromParentTest(fixtures.MappedTest):
"""test #2617
"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('z', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('z_id', Integer, ForeignKey('z.id'))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('d', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('b_id', Integer, ForeignKey('b.id'))
)
@classmethod
def setup_classes(cls):
class Z(cls.Comparable):
pass
class A(cls.Comparable):
pass
class B(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
Z = cls.classes.Z
A = cls.classes.A
B = cls.classes.B
D = cls.classes.D
mapper(Z, cls.tables.z)
mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type,
with_polymorphic='*',
properties={
'zs': relationship(Z, lazy="subquery")
})
mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b',
properties={
'related': relationship(D, lazy="subquery",
primaryjoin=cls.tables.d.c.b_id ==
cls.tables.b.c.id)
})
mapper(D, cls.tables.d, inherits=A, polymorphic_identity='d')
@classmethod
def insert_data(cls):
B = cls.classes.B
session = Session()
session.add(B())
session.commit()
def test_2617(self):
A = self.classes.A
session = Session()
def go():
a1 = session.query(A).first()
eq_(a1.related, [])
self.assert_sql_count(testing.db, go, 3)
class SubClassToSubClassMultiTest(AssertsCompiledSQL, fixtures.MappedTest):
"""
Two different joined-inh subclasses, led by a
parent, with two distinct endpoints:
parent -> subcl1 -> subcl2 -> (ep1, ep2)
the join to ep2 indicates we need to join
from the middle of the joinpoint, skipping ep1
"""
run_create_tables = None
run_deletes = None
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
Table('base1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
Table('sub1', metadata,
Column('id', Integer, ForeignKey('base1.id'), primary_key=True),
Column('parent_id', ForeignKey('parent.id')),
Column('subdata', String(30))
)
Table('base2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base1_id', ForeignKey('base1.id')),
Column('data', String(30))
)
Table('sub2', metadata,
Column('id', Integer, ForeignKey('base2.id'), primary_key=True),
Column('subdata', String(30))
)
Table('ep1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base2_id', Integer, ForeignKey('base2.id')),
Column('data', String(30))
)
Table('ep2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base2_id', Integer, ForeignKey('base2.id')),
Column('data', String(30))
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base1(cls.Comparable):
pass
class Sub1(Base1):
pass
class Base2(cls.Comparable):
pass
class Sub2(Base2):
pass
class EP1(cls.Comparable):
pass
class EP2(cls.Comparable):
pass
@classmethod
def _classes(cls):
return cls.classes.Parent, cls.classes.Base1,\
cls.classes.Base2, cls.classes.Sub1,\
cls.classes.Sub2, cls.classes.EP1,\
cls.classes.EP2
@classmethod
def setup_mappers(cls):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = cls._classes()
mapper(Parent, cls.tables.parent, properties={
'sub1': relationship(Sub1)
})
mapper(Base1, cls.tables.base1, properties={
'sub2': relationship(Sub2)
})
mapper(Sub1, cls.tables.sub1, inherits=Base1)
mapper(Base2, cls.tables.base2, properties={
'ep1': relationship(EP1),
'ep2': relationship(EP2)
})
mapper(Sub2, cls.tables.sub2, inherits=Base2)
mapper(EP1, cls.tables.ep1)
mapper(EP2, cls.tables.ep2)
def test_one(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Parent).join(Parent.sub1, Sub1.sub2).
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 "
"ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_two(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s2a = aliased(Sub2, flat=True)
s = Session()
self.assert_compile(
s.query(Parent).join(Parent.sub1).
join(s2a, Sub1.sub2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 AS base2_1 JOIN sub2 AS sub2_1 "
"ON base2_1.id = sub2_1.id) "
"ON base1.id = base2_1.base1_id"
)
def test_three(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Base1).join(Base1.sub2).
join(Sub2.ep1).\
join(Sub2.ep2),
"SELECT base1.id AS base1_id, base1.data AS base1_data "
"FROM base1 JOIN (base2 JOIN sub2 "
"ON base2.id = sub2.id) ON base1.id = "
"base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_four(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).join(Base1, Base1.id == Sub2.base1_id).
join(Sub2.ep1).\
join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN base1 ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_five(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).join(Sub1, Sub1.id == Sub2.base1_id).
join(Sub2.ep1).\
join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN "
"(base1 JOIN sub1 ON base1.id = sub1.id) "
"ON sub1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_six(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).from_self().\
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_id AS anon_1_base2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_1 "
"JOIN ep1 ON anon_1.base2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
def test_seven(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
# adding Sub2 to the entities list helps it,
# otherwise the joins for Sub2.ep1/ep2 don't have columns
# to latch onto. Can't really make it better than this
s.query(Parent, Sub2).join(Parent.sub1).\
join(Sub1.sub2).from_self().\
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_id AS anon_1_base2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
"sub2.id AS sub2_id, "
"base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, "
"base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id) AS anon_1 "
"JOIN ep1 ON anon_1.base2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
class JoinAcrossJoinedInhMultiPath(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""test long join paths with a joined-inh in the middle, where we go multiple
times across the same joined-inh to the same target but with other classes
in the middle. E.g. test [ticket:2908]
"""
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Root(Base):
__tablename__ = 'root'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
intermediate = relationship("Intermediate")
sub1 = relationship("Sub1")
class Intermediate(Base):
__tablename__ = 'intermediate'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
root_id = Column(Integer, ForeignKey('root.id'))
sub1 = relationship("Sub1")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
class Sub1(Parent):
__tablename__ = 'sub1'
id = Column(Integer, ForeignKey('parent.id'),
primary_key=True)
target = relationship("Target")
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
def test_join(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
s1_alias = aliased(Sub1)
s2_alias = aliased(Sub1)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = Session()
q = sess.query(Root).\
join(s1_alias, Root.sub1).join(t1_alias, s1_alias.target).\
join(Root.intermediate).join(s2_alias, Intermediate.sub1).\
join(t2_alias, s2_alias.target)
self.assert_compile(q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_1 "
"ON anon_1.sub1_id = root.sub1_id "
"JOIN target AS target_1 ON anon_1.sub1_id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_2 "
"ON anon_2.sub1_id = intermediate.sub1_id "
"JOIN target AS target_2 ON anon_2.sub1_id = target_2.sub1_id")
def test_join_flat(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
s1_alias = aliased(Sub1, flat=True)
s2_alias = aliased(Sub1, flat=True)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = Session()
q = sess.query(Root).\
join(s1_alias, Root.sub1).join(t1_alias, s1_alias.target).\
join(Root.intermediate).join(s2_alias, Intermediate.sub1).\
join(t2_alias, s2_alias.target)
self.assert_compile(q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = root.sub1_id "
"JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 ON parent_2.id = sub1_2.id) "
"ON sub1_2.id = intermediate.sub1_id "
"JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id"
)
def test_joinedload(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
sess = Session()
q = sess.query(Root).\
options(
joinedload(Root.sub1).joinedload(Sub1.target),
joinedload(Root.intermediate).joinedload(Intermediate.sub1).\
joinedload(Sub1.target),
)
self.assert_compile(q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id, "
"target_1.id AS target_1_id, target_1.sub1_id AS target_1_sub1_id, "
"sub1_1.id AS sub1_1_id, parent_1.id AS parent_1_id, "
"intermediate_1.id AS intermediate_1_id, "
"intermediate_1.sub1_id AS intermediate_1_sub1_id, "
"intermediate_1.root_id AS intermediate_1_root_id, "
"target_2.id AS target_2_id, target_2.sub1_id AS target_2_sub1_id, "
"sub1_2.id AS sub1_2_id, parent_2.id AS parent_2_id "
"FROM root "
"LEFT OUTER JOIN intermediate AS intermediate_1 "
"ON root.id = intermediate_1.root_id "
"LEFT OUTER JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) ON sub1_1.id = intermediate_1.sub1_id "
"LEFT OUTER JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"LEFT OUTER JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) ON sub1_2.id = root.sub1_id "
"LEFT OUTER JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id")
class MultipleAdaptUsesEntityOverTableTest(AssertsCompiledSQL, fixtures.MappedTest):
__dialect__ = 'default'
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String)
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('c', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('bid', Integer, ForeignKey('b.id'))
)
Table('d', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('cid', Integer, ForeignKey('c.id'))
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D
a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d
mapper(A, a)
mapper(B, b, inherits=A)
mapper(C, c, inherits=A)
mapper(D, d, inherits=A)
def _two_join_fixture(self):
A, B, C, D = self.classes.A, self.classes.B, self.classes.C, self.classes.D
s = Session()
return s.query(B.name, C.name, D.name).select_from(B).\
join(C, C.bid == B.id).\
join(D, D.cid == C.id)
def test_two_joins_adaption(self):
a, b, c, d = self.tables.a, self.tables.b, self.tables.c, self.tables.d
q = self._two_join_fixture()
btoc = q._from_obj[0].left
ac_adapted = btoc.right.element.left
c_adapted = btoc.right.element.right
is_(ac_adapted.element, a)
is_(c_adapted.element, c)
ctod = q._from_obj[0].right
ad_adapted = ctod.left
d_adapted = ctod.right
is_(ad_adapted.element, a)
is_(d_adapted.element, d)
bname, cname, dname = q._entities
b_name_adapted = bname._resolve_expr_against_query_aliases(
q, bname.column, None)
c_name_adapted = cname._resolve_expr_against_query_aliases(
q, cname.column, None)
d_name_adapted = dname._resolve_expr_against_query_aliases(
q, dname.column, None)
assert bool(b_name_adapted == a.c.name)
assert bool(c_name_adapted == ac_adapted.c.name)
assert bool(d_name_adapted == ad_adapted.c.name)
def test_two_joins_sql(self):
q = self._two_join_fixture()
self.assert_compile(q,
"SELECT a.name AS a_name, a_1.name AS a_1_name, "
"a_2.name AS a_2_name "
"FROM a JOIN b ON a.id = b.id JOIN "
"(a AS a_1 JOIN c AS c_1 ON a_1.id = c_1.id) ON c_1.bid = b.id "
"JOIN (a AS a_2 JOIN d AS d_1 ON a_2.id = d_1.id) "
"ON d_1.cid = c_1.id"
)<|fim▁end|> |
engineers_to_org = Table('engineers_to_org', metadata,
Column('org_id', Integer, |
<|file_name|>test_run_new.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# This file is part of the VecNet OpenMalaria Portal.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from django.test.testcases import TestCase
from django.conf import settings
import run
from website.apps.ts_om.models import Simulation
class RunNewTest(TestCase):
def test_failure(self):
simulation = Simulation.objects.create()
simulation.set_input_file("")
run.main(simulation.id)
simulation.refresh_from_db()
self.assertEqual(simulation.status, Simulation.FAILED)
self.assertEqual("Exit code: 66", simulation.last_error_message)
model_stdout = simulation.model_stdout.read().decode("utf-8")
self.assertIn("XSD error", model_stdout)
self.assertIn("invalid document structure", model_stdout)
def test_success(self):
simulation = Simulation.objects.create()
with open(os.path.join(settings.BASE_DIR, "website", "apps", "ts_om", "tests", "data", "default.xml")) as fp:
simulation.set_input_file(fp)
run.main(simulation.id)<|fim▁hole|> self.assertEqual(simulation.status, Simulation.COMPLETE)
self.assertEqual("", simulation.last_error_message)
model_stdout = simulation.model_stdout.read().decode("utf-8")
self.assertIn("100%", model_stdout)
output = simulation.output_file.read().decode("utf-8")
self.assertNotEqual(output, "")
ctsout = simulation.ctsout_file.read().decode("utf-8")
self.assertNotEqual(ctsout, "")<|fim▁end|> | simulation.refresh_from_db() |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>// build.rs
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::Path;
use glsl::syntax::{SingleDeclaration,
StorageQualifier, TypeQualifierSpec };
use glsl::visitor::{Visit, Visitor};
struct Counter {
var_nb: usize
}
impl Visitor for Counter {
// fn visit_identifier(&mut self, id: &mut Identifier) -> Visit {
// print!("identifier: {}\n", id.0);
// Visit::Children
// }
fn visit_single_declaration(&mut self, decl: &mut SingleDeclaration) -> Visit {
let qs = &decl.ty
.qualifier
.as_ref().unwrap()
.qualifiers.0;
if qs.len() < 2 { return Visit::Parent; }
if qs[1] != TypeQualifierSpec::Storage(StorageQualifier::In) { return Visit::Parent; }
match &decl.name {
Some(str) => {
print!("declaration: {:?} {}\n", qs[1], str);
print!("let shader = gl.create_shader(t).jsok()?;\n",);
print!("gl.shader_source(&shader, code);\n");
print!("gl.compile_shader(&shader);\n");
self.var_nb += 1;
},
None => {}
}
Visit::Parent
}
}
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("hello.rs");
let mut f = File::create(&dest_path).unwrap();
f.write_all(b"
pub fn message() -> &'static str {
\"Hello, World!\"
}
").unwrap();
let vs = "attribute vec3 coordinates;
void main(void) {
gl_Position = vec4(coordinates, 1.0);
}";
let fs = "
void main(void) {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1);
}";
<|fim▁hole|> print!("gl.compile_shader(&vsshader);\n");
print!("let fscode = \"{}\";\n", fs);
print!("let fsshader = gl.create_shader(t).jsok()?;\n",);
print!("gl.shader_source(&fsshader, fscode);\n");
print!("gl.compile_shader(&fsshader);\n");
print!("}}");
//let stage = ShaderStage::parse(vs);
//assert!(stage.is_ok());
//let mut counter = Counter { var_nb: 0 };
//stage.expect("").visit(&mut counter);
}<|fim▁end|> | print!("fn shader_pbr() {{\n");
print!("let vscode = \"{}\";\n", vs);
print!("let vsshader = gl.create_shader(t).jsok()?;\n",);
print!("gl.shader_source(&vsshader, vscode);\n"); |
<|file_name|>BrowserPermissionService.test.ts<|end_file_name|><|fim▁begin|>import {container} from "tsyringe";
import {BrowserPermissionService} from "../../../src/service/browser/BrowserPermissionService";
describe("BrowserPermissionServiceTest", (): void => {
let testee: BrowserPermissionService;
beforeEach((): void => {
container.reset();
testee = container.resolve(BrowserPermissionService);
});
test("testRequestUrlPermission", async (): Promise<void> => {
mockBrowser.permissions.request //
.expect({origins: []}) //
.andResolve(true) //
.times(1);
const result = testee.requestUrlPermission([]);
await expect(result).resolves.toBeTruthy();
});
test("testRequestUrlPermissionFailed", async (): Promise<void> => {
const error = new Error();
mockBrowser.permissions.request //
.expect({origins: []}) //
.andReject(error) //
.times(1);
const result = testee.requestUrlPermission([]);<|fim▁hole|>
await expect(result).rejects.toBe(error);
});
test("testGetAllUrlPermissions", async (): Promise<void> => {
const error = new Error();
mockBrowser.permissions.getAll //
.expect //
.andResolve({origins: ["test"]}) //
.times(1);
const permissions = await testee.getAllUrlPermissions();
expect(permissions).toEqual(["test"]);
});
test("testRemoveUrlPermissions", async (): Promise<void> => {
const error = new Error();
mockBrowser.permissions.remove //
.expect({origins: ["test"]}) //
.andResolve(true) //
.times(1);
const permissions = await testee.removeUrlPermissions(["test"]);
expect(permissions).toBeTruthy();
});
});<|fim▁end|> | |
<|file_name|>worker.js<|end_file_name|><|fim▁begin|>var http = require('http')
, https = require('https')
, url = require('url')
, vm = require('vm')
, cluster = require('cluster')
, util = require('util')
, haikuConsole = require('./haikuConsole.js')
, sandbox = require('./sandbox.js')
var shutdown
, shutdownInProgress = false
, requestCount = 0
, argv
process.on('message', function (msg) {
process.send({ response: msg.challange });
})
.on('uncaughtException', function (err) {
log('Entering shutdown mode after an uncaught exception: '
+ (err.message || err) + (err.stack ? '\n' + err.stack : ''));
initiateShutdown();
});
function log(thing) {
console.log(process.pid + ': ' + thing);
}
function shutdownNext() {
if (shutdown) {
clearTimeout(shutdown);
shutdown = undefined;
}
process.nextTick(function() {
log('Recycling self. Active connections: TCP: ' + httpServer.connections + ', TLS: ' + httpsServer.connections);
process.exit();
});
}
// raised by HTTP or HTTPS server when one of the client connections closes
function onConnectionClose() {
if (shutdownInProgress && 0 === (httpServer.connections + httpsServer.connections))
shutdownNext()
}
function initiateShutdown() {
if (!shutdownInProgress) {
// stop accepting new requests
httpServer.close();
httpsServer.close();
shutdownInProgress = true;
if (0 === (httpServer.connections + httpsServer.connections)) {
// there are no active connections - shut down now
shutdownNext();
}
else {
// Shut down when all active connections close (see onConnectionClose above)
// or when the graceful shutdown timeout expires, whichever comes first.
// Graceful shutdown timeout is twice the handler processing timeout.
shutdown = setTimeout(shutdownNext, argv.t * 2);
}
}
}
function onRequestFinished(context) {
if (!context.finished) {
context.finished = true;
context.req.socket.end(); // force buffers to be be flushed
}
}
function haikuError(context, status, error) {
log(new Date() + ' Status: ' + status + ', Request URL: ' + context.req.url + ', Error: ' + error);
try {
context.req.resume();
context.res.writeHead(status);
if (error && 'HEAD' !== context.req.method)
context.res.end((typeof error === 'string' ? error : JSON.stringify(error)) + '\n');
else
context.res.end();
}
catch (e) {
// empty
}
onRequestFinished(context);
}
function limitExecutionTime(context) {
// setup timeout for request processing
context.timeout = setTimeout(function () {
delete context.timeout;
haikuError(context, 500, 'Handler ' + context.handlerName + ' did not complete within the time limit of ' + argv.t + 'ms');
onRequestFinished(context);
}, argv.t); // handler processing timeout
// intercept end of response to cancel the timeout timer and
// speed up shutdown if one is in progress
context.res.end = sandbox.wrapFunction(context.res, 'end', function () {
var result = arguments[--arguments.length].apply(this, arguments);
if (context.timeout) {
clearTimeout(context.timeout);
delete context.timeout;
onRequestFinished(context);
}
return result;
});
}
function executeHandler(context) {
log(new Date() + ' executing ' + context.handlerName);
// limit execution time of the handler to the preconfigured value
limitExecutionTime(context);
// expose rigged console through sandbox
var sandboxAddons = {
console: haikuConsole.createConsole(context, argv.l, argv.d)
}
// evaluate handler code in strict mode to prevent stack walking from untrusted code
context.handler = "'use strict';" + context.handler;
context.req.resume();
try {
vm.runInNewContext(context.handler, sandbox.createSandbox(context, sandboxAddons), context.handlerName);
}
catch (e) {
haikuError(context, 500, 'Handler ' + context.handlerName + ' generated an exception at runtime: '
+ (e.message || e) + (e.stack ? '\n' + e.stack : ''));
}
}
function resolveHandler(context) {
if (!context.handlerName)
return haikuError(context, 400,
'The x-haiku-handler HTTP request header or query paramater must specify the URL of the scriptlet to run.');
try {
context.handlerUrl = url.parse(context.handlerName);
}
catch (e) {
return haikuError(context, 400, 'The x-haiku-handler parameter must be a valid URL that resolves to a JavaScript scriptlet.');
}
var engine;
if (context.handlerUrl.protocol === 'http:') {
engine = http;
context.handlerUrl.port = context.handlerUrl.port || 80;
}
else if (context.handlerUrl.protocol === 'https:') {
engine = https;
context.handlerUrl.port = context.handlerUrl.port || 443;
}
else
return haikuError(context, 400, 'The x-haiku-handler parameter specifies unsupported protocol. Only http and https are supported.');
<|fim▁hole|> context.handler = '';
var length = 0;
res.on('data', function(chunk) {
length += chunk.length;
if (length > argv.i) {
handlerRequest.abort();
return haikuError(context, 400, 'The size of the handler exceeded the quota of ' + argv.i + ' bytes.');
}
context.handler += chunk;
})
.on('end', function() {
if (res.statusCode === 200)
executeHandler(context);
else if (res.statusCode === 302 && context.redirect < 3) {
context.handlerName = res.headers['location'];
context.redirect++;
resolveHandler(context);
}
else
return haikuError(context, 400, 'HTTP error when obtaining handler code from ' + context.handlerName + ': ' + res.statusCode);
});
}
var processError = function(error) {
haikuError(context, 400, 'Unable to obtain HTTP handler code from ' + context.handlerName + ': ' + error);
}
if (argv.proxyHost) {
// HTTPS or HTTP request through HTTP proxy
http.request({ // establishing a tunnel
host: argv.proxyHost,
port: argv.proxyPort,
method: 'CONNECT',
path: context.handlerUrl.hostname + ':' + context.handlerUrl.port
}).on('connect', function(pres, socket, head) {
if (pres.statusCode !== 200)
return haikuError(context, 400, 'Unable to connect to the host ' + context.host);
else
handlerRequest = engine.get({
host: context.handlerUrl.hostname,
port: context.handlerUrl.port,
path: context.handlerUrl.path,
socket: socket, // using a tunnel
agent: false // cannot use a default agent
}, processResponse).on('error', processError);
}).on('error', processError).end();
}
else // no proxy
handlerRequest = engine.get({
host: context.handlerUrl.hostname,
port: context.handlerUrl.port,
path: context.handlerUrl.path
}, processResponse).on('error', processError);
}
function getHaikuParam(context, name, defaultValue) {
return context.req.headers[name] || context.reqUrl.query[name] || defaultValue;
}
function processRequest(req, res) {
if (req.url === '/favicon.ico')
return haikuError({ req: req, res: res}, 404);
if (!shutdownInProgress && argv.r > 0 && ++requestCount >= argv.r) {
log('Entering shutdown mode after reaching request quota. Current active connections: TCP: '
+ httpServer.connections + ', TLS: ' + httpsServer.connections);
initiateShutdown();
}
req.pause();
var context = {
req: req,
res: res,
redirect: 0,
reqUrl: url.parse(req.url, true)
}
context.handlerName = getHaikuParam(context, 'x-haiku-handler');
context.console = getHaikuParam(context, 'x-haiku-console', 'none');
resolveHandler(context);
}
exports.main = function(args) {
argv = args;
// enter module sanbox - from now on all module reustes in this process will
// be subject to sandboxing
sandbox.enterModuleSandbox();
httpServer = http.createServer(processRequest)
.on('connection', function(socket) {
socket.on('close', onConnectionClose)
})
.listen(argv.p);
httpsServer = https.createServer({ cert: argv.cert, key: argv.key }, processRequest)
.on('connection', function(socket) {
socket.on('close', onConnectionClose)
})
.listen(argv.s);
}<|fim▁end|> | var handlerRequest;
var processResponse = function(res) { |
<|file_name|>end.js<|end_file_name|><|fim▁begin|>const EventEmitter = require('events');
/**
* Ends the session. Uses session protocol command.
*
* @example
* this.demoTest = function (browser) {
* browser.end();
* };
*
* @method end<|fim▁hole|> * @param {function} [callback] Optional callback function to be called when the command finishes.
* @see session
* @api protocol.sessions
*/
class End extends EventEmitter {
command(callback) {
const client = this.client;
if (this.api.sessionId) {
this.api.session('delete', result => {
client.session.clearSession();
client.setApiProperty('sessionId', null);
this.complete(callback, result);
});
} else {
setImmediate(() => {
this.complete(callback, null);
});
}
return this.client.api;
}
complete(callback, result) {
if (typeof callback === 'function') {
callback.call(this.api, result);
}
this.emit('complete');
}
}
module.exports = End;<|fim▁end|> | * @syntax .end([callback]) |
<|file_name|>TODO.py<|end_file_name|><|fim▁begin|># mesa - toolkit for building dynamic python apps with zero downtime
# basis: package is inspected for all instances of specified abc and each added to internal mesa list
# Casa is a mesa obj is instantiated as holder of dynamic obj list, one for each abc type in specified package
# m = mesa.Casa(hideExceptions=False) parameter instructs whether to generate exception on existance of methods to run against abc method list
# Mesa.run('method name') = for methods executes named method against each concrete class in the package, does a check to ensure method name exists in abc
# Mesa.generate('method name') = a generator for functions that emits the results from calls to the specified function name in each concrete class. also checks
#
# house recipes
# event driven messsage-passing based app framework - each casa contains a specific route or flow
# wsgi based simple mvc web framework using 2bit as templating language. single casa for all pages
# DOTO: decide best way to test, some scenarios require file io but no clicks required - simple unit tests
# DOTO: generate is a generator yielding a dictionary of results
# DOTO: check flickoutr and how to dynamically create classes with parameters
# DOTO: auth - way to supply callback for required input fields collection from ui
# DOTO: base.Casa appears to pass it's own instance as self to called module. Unsure what side effects are?
# DOTO: utility interface to implement by client app to take care of input for each specific data type
# DOTO: accompanying Method utility that once required args are declared once, elegant handling <|fim▁hole|># TODO: mesa test suit scenarios:
# build a casa, add class, rebuild casa
# build casa, call method not in abc
# build casa with concrete class not implementing an abc method<|fim▁end|> | # ie no passing from interface to host back to interface like it is in unit test right now
# TODO: meta methods that build on the basic iterating methods to abstract away iteration from caller
# TODO: check for abc type conformance
# TODO: at minute convention is that dynamic module contains one class of same name. Change to support all/others |
<|file_name|>improviser.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
*** Description ***
Converts a progression to chords, orchestrates them and plays
them using fluidsynth.
Make sure to set SF2 to a valid soundfont file.
Based on play_progression.py
"""
from mingus.core import progressions, intervals
from mingus.core import chords as ch
from mingus.containers import NoteContainer, Note
from mingus.midi import fluidsynth
import time
import sys
from random import random, choice, randrange
SF2 = 'soundfont.sf2'
progression = ['I', 'bVdim7']
# progression = ["I", "vi", "ii", "iii7", "I7", "viidom7", "iii7",
# "V7"]
key = 'C'
# If True every second iteration will be played in double time, starting on the
# first
double_time = True
orchestrate_second = True
swing = True
play_solo = True
play_drums = True
play_bass = True
play_chords = True
bar_length = 1.75
song_end = 28
# Control beginning of solos and chords
solo_start = 8
solo_end = 20
chord_start = 16
chord_end = 24
# Channels
chord_channel = 1
chord_channel2 = 7
chord_channel3 = 3
bass_channel = 4
solo_channel = 13
random_solo_channel = False
if not fluidsynth.init(SF2):
print "Couldn't load soundfont", SF2
sys.exit(1)
chords = progressions.to_chords(progression, key)
loop = 1
while loop < song_end:
i = 0
if random_solo_channel:
solo_channel = choice(range(5, 8) + [11])
for chord in chords:
c = NoteContainer(chords[i])
l = Note(c[0].name)
n = Note('C')
l.octave_down()
l.octave_down()
print ch.determine(chords[i])[0]
if not swing and play_chords and loop > chord_start and loop\
< chord_end:
fluidsynth.play_NoteContainer(c, chord_channel, randrange(50, 75))
if play_chords and loop > chord_start and loop < chord_end:
if orchestrate_second:
if loop % 2 == 0:
fluidsynth.play_NoteContainer(c, chord_channel2,
randrange(50, 75))
else:
fluidsynth.play_NoteContainer(c, chord_channel2, randrange(50,
75))
if double_time:
beats = [random() > 0.5 for x in range((loop % 2 + 1) * 8)]
else:
beats = [random() > 0.5 for x in range(8)]
t = 0
for beat in beats:
# Play random note
if beat and play_solo and loop > solo_start and loop < solo_end:
fluidsynth.stop_Note(n)
if t % 2 == 0:
n = Note(choice(c).name)
elif random() > 0.5:
if random() < 0.46:
n = Note(intervals.second(choice(c).name, key))
elif random() < 0.46:
n = Note(intervals.seventh(choice(c).name, key))
else:
n = Note(choice(c).name)
if t > 0 and t < len(beats) - 1:
if beats[t - 1] and not beats[t + 1]:<|fim▁hole|> print n
# Repeat chord on half of the bar
if play_chords and t != 0 and loop > chord_start and loop\
< chord_end:
if swing and random() > 0.95:
fluidsynth.play_NoteContainer(c, chord_channel3,
randrange(20, 75))
elif t % (len(beats) / 2) == 0 and t != 0:
fluidsynth.play_NoteContainer(c, chord_channel3,
randrange(20, 75))
# Play bass note
if play_bass and t % 4 == 0 and t != 0:
l = Note(choice(c).name)
l.octave_down()
l.octave_down()
fluidsynth.play_Note(l, bass_channel, randrange(50, 75))
elif play_bass and t == 0:
fluidsynth.play_Note(l, bass_channel, randrange(50, 75))
# Drums
if play_drums and loop > 0:
if t % (len(beats) / 2) == 0 and t != 0:
fluidsynth.play_Note(Note('E', 2), 9, randrange(50, 100)) # snare
else:
if random() > 0.8 or t == 0:
fluidsynth.play_Note(Note('C', 2), 9, randrange(20,
100)) # bass
if t == 0 and random() > 0.75:
fluidsynth.play_Note(Note('C#', 3), 9, randrange(60, 100)) # crash
if swing:
if random() > 0.9:
fluidsynth.play_Note(Note('A#', 2), 9, randrange(50,
100)) # hihat open
elif random() > 0.6:
fluidsynth.play_Note(Note('G#', 2), 9, randrange(100,
120)) # hihat closed
if random() > 0.95:
fluidsynth.play_Note(Note('E', 2), 9, 100) # snare
elif t % 2 == 0:
fluidsynth.play_Note(Note('A#', 2), 9, 100) # hihat open
else:
if random() > 0.9:
fluidsynth.play_Note(Note('E', 2), 9, 100) # snare
if swing:
if t % 2 == 0:
time.sleep((bar_length / (len(beats) * 3)) * 4)
else:
time.sleep((bar_length / (len(beats) * 3)) * 2)
else:
time.sleep(bar_length / len(beats))
t += 1
fluidsynth.stop_NoteContainer(c, chord_channel)
fluidsynth.stop_NoteContainer(c, chord_channel2)
fluidsynth.stop_NoteContainer(c, chord_channel3)
fluidsynth.stop_Note(l, bass_channel)
fluidsynth.stop_Note(n, solo_channel)
i += 1
print '-' * 20
loop += 1<|fim▁end|> | n = Note(choice(c).name)
fluidsynth.play_Note(n, solo_channel, randrange(80, 110)) |
<|file_name|>pyborg.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# PyBorg: The python AI bot.
#
# Copyright (c) 2000, 2006 Tom Morton, Sebastien Dailly
#
#
# This bot was inspired by the PerlBorg, by Eric Bock.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Tom Morton <[email protected]>
# Seb Dailly <[email protected]>
#
from random import *
import ctypes
import sys
import os
import fileinput
import marshal # buffered marshal is bloody fast. wish i'd found this before :)
import struct
import time
import zipfile
import re
import threading
timers_started = False
def to_sec(s):
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
return int(s[:-1])*seconds_per_unit[s[-1]]
# This will make the !learn and !teach magic work ;)
def dbread(key):
value = None
if os.path.isfile("qdb.dat"):
file = open("qdb.dat")
for line in file.readlines():
reps = int(len(line.split(":=:"))-1)
data = line.split(":=:")[0]
dlen = r'\b.{2,}\b'
if re.search(dlen, key, re.IGNORECASE):
if key.lower() in data.lower() or data.lower() in key.lower():
if reps > 1:
repnum = randint(1, int(reps))
value = line.split(":=:")[repnum].strip()
else: value = line.split(":=:")[1].strip()
break
else:
value = None
break
file.close()
return value
def dbwrite(key, value):
if dbread(key) is None:
file = open("qdb.dat", "a")
file.write(str(key)+":=:"+str(value)+"\n")
file.close()
else:
for line in fileinput.input("qdb.dat",inplace=1):
data = line.split(":=:")[0]
dlen = r'\b.{2,}\b'
if re.search(dlen, key, re.IGNORECASE):
if key.lower() in data.lower() or data.lower() in key.lower():
print str(line.strip())+":=:"+str(value)
else:
print line.strip()
# Some more machic to fix some common issues with the teach system
def teach_filter(message):
message = message.replace("||", "$C4")
message = message.replace("|-:", "$b7")
message = message.replace(":-|", "$b6")
message = message.replace(";-|", "$b5")
message = message.replace("|:", "$b4")
message = message.replace(";|", "$b3")
message = message.replace("=|", "$b2")
message = message.replace(":|", "$b1")
return message
def unfilter_reply(message):
"""
This undoes the phrase mangling the central code does
so the bot sounds more human :P
"""
# Had to write my own initial capitalizing code *sigh*
message = "%s%s" % (message[:1].upper(), message[1:])
# Fixes punctuation
message = message.replace(" ?", "?")
message = message.replace(" !", "!")
message = message.replace(" .", ".")
message = message.replace(" ,", ",")
message = message.replace(" : ", ": ")
message = message.replace(" ; ", "; ")
# Fixes I and I contractions
message = message.replace(" i ", " I ")
message = message.replace("i'", "I'")
# Fixes the common issues with the teach system
message = message.replace("$C4", "||")
message = message.replace("$b7", "|-:")
message = message.replace("$b6", ";-|")
message = message.replace("$b5", ":-|")
message = message.replace("$b4", "|:")
message = message.replace("$b3", ";|")
message = message.replace("$b2", "=|")
message = message.replace("$b1", ":|")
# Fixes emoticons that don't work in lowercase
emoticon = re.search("(:|x|;|=|8){1}(-)*(p|x|d){1}", message, re.IGNORECASE)
if not emoticon == None:
emoticon = "%s" % emoticon.group()
message = message.replace(emoticon, emoticon.upper())
# Fixes the annoying XP capitalization in words...
message = message.replace("XP", "xp")
message = message.replace(" xp", " XP")
message = message.replace("XX", "xx")
return message
def filter_message(message, bot):
"""
Filter a message body so it is suitable for learning from and
replying to. This involves removing confusing characters,
padding ? and ! with ". " so they also terminate lines
and converting to lower case.
"""
# remove garbage
message = message.replace("\"", "") # remove "s
message = message.replace("\n", " ") # remove newlines
message = message.replace("\r", " ") # remove carriage returns
# remove matching brackets (unmatched ones are likely smileys :-) *cough*
# should except out when not found.
index = 0
try:
while 1:
index = message.index("(", index)
# Remove matching ) bracket
i = message.index(")", index+1)
message = message[0:i]+message[i+1:]
# And remove the (
message = message[0:index]+message[index+1:]
except ValueError, e:
pass
# Strips out mIRC Control codes
ccstrip = re.compile("\x1f|\x02|\x12|\x0f|\x16|\x03(?:\d{1,2}(?:,\d{1,2})?)?", re.UNICODE)
message = ccstrip.sub("", message)
# Few of my fixes...
message = message.replace(": ", " : ")
message = message.replace("; ", " ; ")
# ^--- because some : and ; might be smileys...
message = message.replace("`", "'")
message = message.replace("?", " ? ")
message = message.replace("!", " ! ")
message = message.replace(".", " . ")
message = message.replace(",", " , ")
# Fixes broken emoticons...
message = message.replace("^ . ^", "^.^")
message = message.replace("- . -", "-.-")
message = message.replace("0 . o", "0.o")
message = message.replace("o . o", "o.o")
message = message.replace("O . O", "O.O")
message = message.replace("< . <", "<.<")
message = message.replace("> . >", ">.>")
message = message.replace("> . <", ">.<")
message = message.replace(": ?", ":?")
message = message.replace(":- ?", ":-?")
message = message.replace(", , l , ,", ",,l,,")
message = message.replace("@ . @", "@.@")
words = message.split()
if bot.settings.process_with == "pyborg":
for x in xrange(0, len(words)):
#is there aliases ?
for z in bot.settings.aliases.keys():
for alias in bot.settings.aliases[z]:
pattern = "^%s$" % alias
if re.search(pattern, words[x]):
words[x] = z
message = " ".join(words)
return message
class pyborg:
import re
import cfgfile
ver_string = "PyBorg version 1.1.0"
saves_version = "1.1.0"
# Main command list
commandlist = "Pyborg commands:\n!checkdict, !contexts, !help, !known, !learning, !rebuilddict, !replace, !unlearn, !purge, !version, !words, !limit, !alias, !save, !censor, !uncensor, !learn, !teach, !forget, !find, !responses"
commanddict = {
"help": "Owner command. Usage: !help [command]\nPrints information about using a command, or a list of commands if no command is given",
"version": "Usage: !version\nDisplay what version of Pyborg we are running",
"words": "Usage: !words\nDisplay how many words are known",
"known": "Usage: !known word1 [word2 [...]]\nDisplays if one or more words are known, and how many contexts are known",
"contexts": "Owner command. Usage: !contexts <phrase>\nPrint contexts containing <phrase>",
"unlearn": "Owner command. Usage: !unlearn <expression>\nRemove all occurances of a word or expression from the dictionary. For example '!unlearn of of' would remove all contexts containing double 'of's",
"purge": "Owner command. Usage: !purge [number]\nRemove all occurances of the words that appears in less than <number> contexts",
"replace": "Owner command. Usage: !replace <old> <new>\nReplace all occurances of word <old> in the dictionary with <new>",
"learning": "Owner command. Usage: !learning [on|off]\nToggle bot learning. Without arguments shows the current setting",
"checkdict": "Owner command. Usage: !checkdict\nChecks the dictionary for broken links. Shouldn't happen, but worth trying if you get KeyError crashes",
"rebuilddict": "Owner command. Usage: !rebuilddict\nRebuilds dictionary links from the lines of known text. Takes a while. You probably don't need to do it unless your dictionary is very screwed",
"censor": "Owner command. Usage: !censor [word1 [...]]\nPrevent the bot using one or more words. Without arguments lists the currently censored words",
"uncensor": "Owner command. Usage: !uncensor word1 [word2 [...]]\nRemove censorship on one or more words",
"limit": "Owner command. Usage: !limit [number]\nSet the number of words that pyBorg can learn",
"alias": "Owner command. Usage: !alias : Show the differents aliases\n!alias <alias> : show the words attached to this alias\n!alias <alias> <word> : link the word to the alias",
"learn": "Owner command. Usage: !learn trigger | response\nTeaches the bot to respond the any words similar to the trigger word or phrase with a certain response",
"teach": "Owner command. Usage: !teach trigger | response\nTeaches the bot to respond the any words similar to the trigger word or phrase with a certain response",
"forget": "Owner command. Usage: !forget trigger\nForces the bot to forget all previously learned responses to a certain trigger word or phrase",
"find": "Owner command. Usage: !find trigger\nFinds all matches to the trigger word or phrase and displays the amount of matches",
"responses": "Owner command. Usage: !responses\nDisplays the total number of trigger/response pairs the bot has learned"
}
def __init__(self):
"""
Open the dictionary. Resize as required.
"""
# Attempt to load settings
self.settings = self.cfgfile.cfgset()
self.settings.load("pyborg.cfg",
{ "num_contexts": ("Total word contexts", 0),
"num_words": ("Total unique words known", 0),
"max_words": ("max limits in the number of words known", 6000),
"learning": ("Allow the bot to learn", 1),
"ignore_list":("Words that can be ignored for the answer", ['!.', '?.', "'", ',', ';']),
"censored": ("Don't learn the sentence if one of those words is found", []),
"num_aliases":("Total of aliases known", 0),
"aliases": ("A list of similars words", {}),
"process_with":("Wich way for generate the reply ( pyborg|megahal)", "pyborg"),
"no_save" :("If True, Pyborg don't saves the dictionary and configuration on disk", "False")
} )
self.answers = self.cfgfile.cfgset()
self.answers.load("answers.txt",
{ "sentences": ("A list of prepared answers", {})
} )
self.unfilterd = {}
# Starts the timers:
global timers_started
if timers_started is False:
try:
self.autosave = threading.Timer(to_sec("125m"), self.save_all)
self.autosave.start()
self.autopurge = threading.Timer(to_sec("5h"), self.auto_optimise)
self.autopurge.start()
self.autorebuild = threading.Timer(to_sec("71h"), self.auto_rebuild)
self.autorebuild.start()
timers_started = True
except SystemExit, e:
self.autosave.cancel()
self.autopurge.cancel()
self.autorebuild.cancel()
if dbread("hello") is None:
dbwrite("hello", "hi #nick")
# Read the dictionary
if self.settings.process_with == "pyborg":
print "Reading dictionary..."
try:
zfile = zipfile.ZipFile('archive.zip','r')
for filename in zfile.namelist():
data = zfile.read(filename)
file = open(filename, 'w+b')
file.write(data)
file.close()
except (EOFError, IOError), e:
print "no zip found"
try:
f = open("version", "rb")
s = f.read()
f.close()
if s != self.saves_version:
print "Error loading dictionary\Please convert it before launching pyborg"
sys.exit(1)
f = open("words.dat", "rb")
s = f.read()
f.close()
self.words = marshal.loads(s)
del s
f = open("lines.dat", "rb")
s = f.read()
f.close()
self.lines = marshal.loads(s)
del s
except (EOFError, IOError), e:
# Create new database
self.words = {}
self.lines = {}
print "Error reading saves. New database created."
# Is a resizing required?
if len(self.words) != self.settings.num_words:
print "Updating dictionary information..."
self.settings.num_words = len(self.words)
num_contexts = 0
# Get number of contexts
for x in self.lines.keys():
num_contexts += len(self.lines[x][0].split())
self.settings.num_contexts = num_contexts
# Save new values
self.settings.save()
# Is an aliases update required ?
compteur = 0
for x in self.settings.aliases.keys():
compteur += len(self.settings.aliases[x])
if compteur != self.settings.num_aliases:
print "check dictionary for new aliases"
self.settings.num_aliases = compteur
for x in self.words.keys():
#is there aliases ?
if x[0] != '~':
for z in self.settings.aliases.keys():
for alias in self.settings.aliases[z]:
pattern = "^%s$" % alias
if self.re.search(pattern, x, re.IGNORECASE):
print "replace %s with %s" %(x, z)
self.replace(x, z)
for x in self.words.keys():
if not (x in self.settings.aliases.keys()) and x[0] == '~':
print "unlearn %s" % x
self.settings.num_aliases -= 1
self.unlearn(x)
print "unlearned aliases %s" % x
#unlearn words in the unlearn.txt file.
try:
f = open("unlearn.txt", "r")
while 1:
word = f.readline().strip('\n')
if word == "":
break
if self.words.has_key(word):
self.unlearn(word)
f.close()
except (EOFError, IOError), e:
# No words to unlearn
pass
self.settings.save()
def save_all(self, restart_timer = True):
if self.settings.process_with == "pyborg" and self.settings.no_save != "True":
print "Writing dictionary..."
nozip = "no"
try:
zfile = zipfile.ZipFile('archive.zip','r')
for filename in zfile.namelist():
data = zfile.read(filename)
file = open(filename, 'w+b')
file.write(data)
file.close()
except (OSError, IOError), e:
print "no zip found. Is the programm launch for first time ?"
try:
os.remove('archive.zip')
except:
pass
f = open("words.dat", "wb")
s = marshal.dumps(self.words)
f.write(s)
f.close()
f = open("lines.dat", "wb")
s = marshal.dumps(self.lines)
f.write(s)
f.close()
#save the version
f = open("version", "w")
f.write(self.saves_version)
f.close()
#zip the files
f = zipfile.ZipFile('archive.zip','w',zipfile.ZIP_DEFLATED)
f.write('words.dat')
f.write('lines.dat')
f.write('version')
f.close()
try:
os.remove('words.dat')
os.remove('lines.dat')
os.remove('version')
except (OSError, IOError), e:
print "could not remove the files"
f = open("words.txt", "w")
# write each words known
wordlist = []
#Sort the list befor to export
for key in self.words.keys():
wordlist.append([key, len(self.words[key])])
wordlist.sort(lambda x,y: cmp(x[1],y[1]))
map( (lambda x: f.write(str(x[0])+"\n\r") ), wordlist)
f.close()
f = open("sentences.txt", "w")
# write each words known
wordlist = []
#Sort the list befor to export
for key in self.unfilterd.keys():
wordlist.append([key, self.unfilterd[key]])
wordlist.sort(lambda x,y: cmp(y[1],x[1]))
map( (lambda x: f.write(str(x[0])+"\n") ), wordlist)
f.close()
if restart_timer is True:
self.autosave = threading.Timer(to_sec("125m"), self.save_all)
self.autosave.start()
# Save settings
self.settings.save()
def auto_optimise(self):
if self.settings.process_with == "pyborg" and self.settings.learning == 1:
# Let's purge out words with little or no context every day to optimise the word list
t = time.time()
liste = []
compteur = 0
for w in self.words.keys():
digit = 0
char = 0
for c in w:
if c.isalpha():
char += 1
if c.isdigit():
digit += 1
try:
c = len(self.words[w])
except:
c = 2
if c < 2 or ( digit and char ):
liste.append(w)
compteur += 1
for w in liste[0:]:
self.unlearn(w)
# Restarts the timer:
self.autopurge = threading.Timer(to_sec("5h"), self.auto_optimise)
self.autopurge.start()
# Now let's save the changes to disk and be done ;)
self.save_all(False)
def auto_rebuild(self):
if self.settings.process_with == "pyborg" and self.settings.learning == 1:
t = time.time()
old_lines = self.lines
old_num_words = self.settings.num_words
old_num_contexts = self.settings.num_contexts
self.words = {}
self.lines = {}
self.settings.num_words = 0
self.settings.num_contexts = 0
for k in old_lines.keys():
self.learn(old_lines[k][0], old_lines[k][1])
# Restarts the timer
self.autorebuild = threading.Timer(to_sec("71h"), self.auto_rebuild)
self.autorebuild.start()
def kill_timers(self):
self.autosave.cancel()
self.autopurge.cancel()
self.autorebuild.cancel()
def process_msg(self, io_module, body, replyrate, learn, args, owner=0, not_quiet=1):
"""
Process message 'body' and pass back to IO module with args.
If owner==1 allow owner commands.
"""
try:
if self.settings.process_with == "megahal": import mh_python
except:
self.settings.process_with = "pyborg"
self.settings.save()
print "Could not find megahal python library\nProgram ending"
sys.exit(1)
# add trailing space so sentences are broken up correctly
body = body + " "
# Parse commands
if body[0] == "!":
self.do_commands(io_module, body, args, owner)
return
# Filter out garbage and do some formatting
body = filter_message(body, self)
# Learn from input
if learn == 1:
if self.settings.process_with == "pyborg":
self.learn(body)
elif self.settings.process_with == "megahal" and self.settings.learning == 1:
mh_python.learn(body)
# Make a reply if desired
if randint(0, 99) < replyrate:
message = ""
#Look if we can find a prepared answer
if dbread(body):
message = unfilter_reply(dbread(body))
elif not_quiet == 1:
for sentence in self.answers.sentences.keys():
pattern = "^%s$" % sentence
if re.search(pattern, body, re.IGNORECASE):
message = self.answers.sentences[sentence][randint(0, len(self.answers.sentences[sentence])-1)]
message = unfilter_reply(message)
break
else:
if body in self.unfilterd:
self.unfilterd[body] = self.unfilterd[body] + 1
else:
self.unfilterd[body] = 0
if message == "":
if self.settings.process_with == "pyborg":
message = self.reply(body)
message = unfilter_reply(message)
elif self.settings.process_with == "megahal":
message = mh_python.doreply(body)
else: return
# single word reply: always output
if len(message.split()) == 1:
io_module.output(message, args)
return
# empty. do not output
if message == "":
return
# else output
if len(message) >= 25:
# Quicker response time for long responses
time.sleep(5)
else:
time.sleep(.2*len(message))
io_module.output(message, args)
def do_commands(self, io_module, body, args, owner):
"""
Respond to user comands.
"""
msg = ""
command_list = body.split()
command_list[0] = command_list[0].lower()
# Guest commands.
# Version string
if command_list[0] == "!version":
msg = self.ver_string
# Learn/Teach commands
if command_list[0] == "!teach" or command_list[0] == "!learn":
try:
key = ' '.join(command_list[1:]).split("|")[0].strip()
key = re.sub("[\.\,\?\*\"\'!]","", key)
rnum = int(len(' '.join(command_list[1:]).split("|"))-1)
if "#nick" in key:
msg = "Stop trying to teach me something that will break me!"
else:
value = teach_filter(' '.join(command_list[1:]).split("|")[1].strip())
dbwrite(key[0:], value[0:])
if rnum > 1:
array = ' '.join(command_list[1:]).split("|")
rcount = 1
for value in array:
if rcount == 1: rcount = rcount+1
else: dbwrite(key[0:], teach_filter(value[0:].strip()))
else:
value = ' '.join(command_list[1:]).split("|")[1].strip()
dbwrite(key[0:], teach_filter(value[0:]))
msg = "New response learned for %s" % key
except: msg = "I couldn't learn that!"
# Forget command
if command_list[0] == "!forget":
if os.path.isfile("qdb.dat"):
try:
key = ' '.join(command_list[1:]).strip()
for line in fileinput.input("qdb.dat" ,inplace =1):
data = line.split(":=:")[0]
dlen = r'\b.{2,}\b'
if re.search(dlen, key, re.IGNORECASE):
if key.lower() in data.lower() or data.lower() in key.lower():
pass
else: print line.strip()
msg = "I've forgotten %s" % key
except: msg = "Sorry, I couldn't forget that!"
else: msg = "You have to teach me before you can make me forget it!"
# Find response command
if command_list[0] == "!find":
if os.path.isfile("qdb.dat"):
rcount = 0
matches = ""
key = ' '.join(command_list[1:]).strip()
file = open("qdb.dat")
for line in file.readlines():
data = line.split(":=:")[0]
dlen = r'\b.{2,}\b'
if re.search(dlen, key, re.IGNORECASE):
if key.lower() in data.lower() or data.lower() in key.lower():
if key.lower() is "": pass
else:
rcount = rcount+1
if matches == "": matches = data
else: matches = matches+", "+data
file.close()
if rcount < 1: msg = "I have no match for %s" % key
elif rcount == 1: msg = "I found 1 match: %s" % matches
else: msg = "I found %d matches: %s" % (rcount, matches)
else: msg = "You need to teach me something first!"
if command_list[0] == "!responses":
if os.path.isfile("qdb.dat"):
rcount = 0
file = open("qdb.dat")
for line in file.readlines():
if line is "": pass
else: rcount = rcount+1
file.close()
if rcount < 1: msg = "I've learned no responses"
elif rcount == 1: msg = "I've learned only 1 response"
else: msg = "I've learned %d responses" % rcount
else: msg = "You need to teach me something first!"
# How many words do we know?
elif command_list[0] == "!words" and self.settings.process_with == "pyborg":
num_w = self.settings.num_words
num_c = self.settings.num_contexts
num_l = len(self.lines)
if num_w != 0:
num_cpw = num_c/float(num_w) # contexts per word
else:
num_cpw = 0.0
msg = "I know %d words (%d contexts, %.2f per word), %d lines." % (num_w, num_c, num_cpw, num_l)
# Do i know this word
elif command_list[0] == "!known" and self.settings.process_with == "pyborg":
if len(command_list) == 2:
# single word specified
word = command_list[1].lower()
if self.words.has_key(word):
c = len(self.words[word])
msg = "%s is known (%d contexts)" % (word, c)
else:
msg = "%s is unknown." % word
elif len(command_list) > 2:
# multiple words.
words = []
for x in command_list[1:]:
words.append(x.lower())
msg = "Number of contexts: "
for x in words:
if self.words.has_key(x):
c = len(self.words[x])
msg += x+"/"+str(c)+" "
else:
msg += x+"/0 "
# Owner commands
if owner == 1:
# Save dictionary
if command_list[0] == "!save":
self.save_all()
msg = "Dictionary saved"
# Command list
elif command_list[0] == "!help":
if len(command_list) > 1:
# Help for a specific command
cmd = command_list[1].lower()
dic = None
if cmd in self.commanddict.keys():
dic = self.commanddict
elif cmd in io_module.commanddict.keys():
dic = io_module.commanddict
if dic:
for i in dic[cmd].split("\n"):
io_module.output(i, args)
else:
msg = "No help on command '%s'" % cmd
else:
for i in self.commandlist.split("\n"):
io_module.output(i, args)
for i in io_module.commandlist.split("\n"):
io_module.output(i, args)
# Change the max_words setting
elif command_list[0] == "!limit" and self.settings.process_with == "pyborg":
msg = "The max limit is "
if len(command_list) == 1:
msg += str(self.settings.max_words)
else:
limit = int(command_list[1].lower())
self.settings.max_words = limit
msg += "now " + command_list[1]
# Check for broken links in the dictionary
elif command_list[0] == "!checkdict" and self.settings.process_with == "pyborg":
t = time.time()
num_broken = 0
num_bad = 0
for w in self.words.keys():
wlist = self.words[w]
for i in xrange(len(wlist)-1, -1, -1):
line_idx, word_num = struct.unpack("iH", wlist[i])
# Nasty critical error we should fix
if not self.lines.has_key(line_idx):
print "Removing broken link '%s' -> %d" % (w, line_idx)
num_broken = num_broken + 1
del wlist[i]
else:
# Check pointed to word is correct
split_line = self.lines[line_idx][0].split()
if split_line[word_num] != w:
print "Line '%s' word %d is not '%s' as expected." % \
(self.lines[line_idx][0],
word_num, w)
num_bad = num_bad + 1
del wlist[i]
if len(wlist) == 0:
del self.words[w]
self.settings.num_words = self.settings.num_words - 1
print "\"%s\" vaped totally" % w
msg = "Checked dictionary in %0.2fs. Fixed links: %d broken, %d bad." % \
(time.time()-t,
num_broken,
num_bad)
# Rebuild the dictionary by discarding the word links and
# re-parsing each line
elif command_list[0] == "!rebuilddict" and self.settings.process_with == "pyborg":
if self.settings.learning == 1:
t = time.time()
old_lines = self.lines
old_num_words = self.settings.num_words
old_num_contexts = self.settings.num_contexts
self.words = {}
self.lines = {}
self.settings.num_words = 0
self.settings.num_contexts = 0
for k in old_lines.keys():
self.learn(old_lines[k][0], old_lines[k][1])
msg = "Rebuilt dictionary in %0.2fs. Words %d (%+d), contexts %d (%+d)" % \
(time.time()-t,
old_num_words,
self.settings.num_words - old_num_words,
old_num_contexts,
self.settings.num_contexts - old_num_contexts)
#Remove rares words
elif command_list[0] == "!purge" and self.settings.process_with == "pyborg":
t = time.time()
liste = []
compteur = 0
if len(command_list) == 2:
# limite d occurences a effacer
c_max = command_list[1].lower()
else:
c_max = 0
c_max = int(c_max)
for w in self.words.keys():
digit = 0
char = 0
for c in w:
if c.isalpha():
char += 1
if c.isdigit():
digit += 1
#Compte les mots inferieurs a cette limite
c = len(self.words[w])
if c < 2 or ( digit and char ):
liste.append(w)
compteur += 1
if compteur == c_max:
break
if c_max < 1:
#io_module.output(str(compteur)+" words to remove", args)
io_module.output("%s words to remove" %compteur, args)
return
#supprime les mots
for w in liste[0:]:
self.unlearn(w)
msg = "Purge dictionary in %0.2fs. %d words removed" % \
(time.time()-t,
compteur)
# Change a typo in the dictionary
elif command_list[0] == "!replace" and self.settings.process_with == "pyborg":
if len(command_list) < 3:
return
old = command_list[1].lower()
new = command_list[2].lower()
msg = self.replace(old, new)
# Print contexts [flooding...:-]
elif command_list[0] == "!contexts" and self.settings.process_with == "pyborg":
# This is a large lump of data and should
# probably be printed, not module.output XXX
# build context we are looking for
context = " ".join(command_list[1:])
context = context.lower()
if context == "":
return
io_module.output("Contexts containing \""+context+"\":", args)
# Build context list
# Pad it
context = " "+context+" "
c = []
# Search through contexts
for x in self.lines.keys():
# get context
ctxt = self.lines[x][0]
# add leading whitespace for easy sloppy search code
ctxt = " "+ctxt+" "
if ctxt.find(context) != -1:
# Avoid duplicates (2 of a word
# in a single context)
if len(c) == 0:
c.append(self.lines[x][0])
elif c[len(c)-1] != self.lines[x][0]:
c.append(self.lines[x][0])
x = 0
while x < 5:
if x < len(c):
io_module.output(c[x], args)
x += 1
if len(c) == 5:
return
if len(c) > 10:
io_module.output("...("+`len(c)-10`+" skipped)...", args)
x = len(c) - 5
if x < 5:
x = 5
while x < len(c):
io_module.output(c[x], args)
x += 1
# Remove a word from the vocabulary [use with care]
elif command_list[0] == "!unlearn" and self.settings.process_with == "pyborg":
# build context we are looking for
context = " ".join(command_list[1:])
context = context.lower()
if context == "":
return
print "Looking for: "+context
# Unlearn contexts containing 'context'
t = time.time()
self.unlearn(context)
# we don't actually check if anything was
# done..
msg = "Unlearn done in %0.2fs" % (time.time()-t)
# Query/toggle bot learning
elif command_list[0] == "!learning":
msg = "Learning mode "
if len(command_list) == 1:
if self.settings.learning == 0:
msg += "off"
else:
msg += "on"
else:
toggle = command_list[1].lower()
if toggle == "on":
msg += "on"
self.settings.learning = 1
else:
msg += "off"
self.settings.learning = 0
# add a word to the 'censored' list
elif command_list[0] == "!censor" and self.settings.process_with == "pyborg":
# no arguments. list censored words
if len(command_list) == 1:
if len(self.settings.censored) == 0:
msg = "No words censored"
else:
msg = "I will not use the word(s) %s" % ", ".join(self.settings.censored)
# add every word listed to censored list
else:
for x in xrange(1, len(command_list)):
if command_list[x] in self.settings.censored:
msg += "%s is already censored" % command_list[x]
else:
self.settings.censored.append(command_list[x].lower())
self.unlearn(command_list[x])
msg += "done"
msg += "\n"
# remove a word from the censored list
elif command_list[0] == "!uncensor" and self.settings.process_with == "pyborg":
# Remove everyone listed from the ignore list
# eg !unignore tom dick harry
for x in xrange(1, len(command_list)):
try:
self.settings.censored.remove(command_list[x].lower())
msg = "done"
except ValueError, e:
pass
elif command_list[0] == "!alias" and self.settings.process_with == "pyborg":
# no arguments. list aliases words
if len(command_list) == 1:
if len(self.settings.aliases) == 0:
msg = "No aliases"
else:
msg = "I will alias the word(s) %s" \
% ", ".join(self.settings.aliases.keys())
# add every word listed to alias list
elif len(command_list) == 2:
if command_list[1][0] != '~': command_list[1] = '~' + command_list[1]
if command_list[1] in self.settings.aliases.keys():
msg = "Thoses words : %s are aliases to %s" \
% ( " ".join(self.settings.aliases[command_list[1]]), command_list[1] )
else:
msg = "The alias %s is not known" % command_list[1][1:]
elif len(command_list) > 2:
#create the aliases
msg = "The words : "
if command_list[1][0] != '~': command_list[1] = '~' + command_list[1]
if not(command_list[1] in self.settings.aliases.keys()):
self.settings.aliases[command_list[1]] = [command_list[1][1:]]
self.replace(command_list[1][1:], command_list[1])
msg += command_list[1][1:] + " "
for x in xrange(2, len(command_list)):
msg += "%s " % command_list[x]
self.settings.aliases[command_list[1]].append(command_list[x])
#replace each words by his alias
self.replace(command_list[x], command_list[1])
msg += "have been aliases to %s" % command_list[1]
# Quit
elif command_list[0] == "!quit":
# Close the dictionary
self.save_all()
sys.exit()
# Save changes
self.settings.save()
if msg != "":
io_module.output(msg, args)
def replace(self, old, new):
"""
Replace all occuraces of 'old' in the dictionary with
'new'. Nice for fixing learnt typos.
"""
try:
pointers = self.words[old]
except KeyError, e:
return old+" not known."
changed = 0
for x in pointers:
# pointers consist of (line, word) to self.lines
l, w = struct.unpack("iH", x)
line = self.lines[l][0].split()
number = self.lines[l][1]
if line[w] != old:
# fucked dictionary
print "Broken link: %s %s" % (x, self.lines[l][0] )
continue
else:
line[w] = new
self.lines[l][0] = " ".join(line)
self.lines[l][1] += number
changed += 1
if self.words.has_key(new):
self.settings.num_words -= 1
self.words[new].extend(self.words[old])
else:
self.words[new] = self.words[old]
del self.words[old]
return "%d instances of %s replaced with %s" % ( changed, old, new )
<|fim▁hole|> """
Unlearn all contexts containing 'context'. If 'context'
is a single word then all contexts containing that word
will be removed, just like the old !unlearn <word>
"""
# Pad thing to look for
# We pad so we don't match 'shit' when searching for 'hit', etc.
context = " "+context+" "
# Search through contexts
# count deleted items
dellist = []
# words that will have broken context due to this
wordlist = []
for x in self.lines.keys():
# get context. pad
c = " "+self.lines[x][0]+" "
if c.find(context) != -1:
# Split line up
wlist = self.lines[x][0].split()
# add touched words to list
for w in wlist:
if not w in wordlist:
wordlist.append(w)
dellist.append(x)
del self.lines[x]
words = self.words
unpack = struct.unpack
# update links
for x in wordlist:
word_contexts = words[x]
# Check all the word's links (backwards so we can delete)
for y in xrange(len(word_contexts)-1, -1, -1):
# Check for any of the deleted contexts
if unpack("iH", word_contexts[y])[0] in dellist:
del word_contexts[y]
self.settings.num_contexts = self.settings.num_contexts - 1
if len(words[x]) == 0:
del words[x]
self.settings.num_words = self.settings.num_words - 1
print "\"%s\" vaped totally" %x
def reply(self, body):
"""
Reply to a line of text.
"""
# split sentences into list of words
_words = body.split(" ")
words = []
for i in _words:
words += i.split()
del _words
if len(words) == 0:
return ""
#remove words on the ignore list
#words = filter((lambda x: x not in self.settings.ignore_list and not x.isdigit() ), words)
words = [x for x in words if x not in self.settings.ignore_list and not x.isdigit()]
# Find rarest word (excluding those unknown)
index = []
known = -1
#The word has to bee seen in already 3 contexts differents for being choosen
known_min = 3
for x in xrange(0, len(words)):
if self.words.has_key(words[x]):
k = len(self.words[words[x]])
else:
continue
if (known == -1 or k < known) and k > known_min:
index = [words[x]]
known = k
continue
elif k == known:
index.append(words[x])
continue
# Index now contains list of rarest known words in sentence
if len(index)==0:
return ""
word = index[randint(0, len(index)-1)]
# Build sentence backwards from "chosen" word
sentence = [word]
done = 0
while done == 0:
#create a dictionary wich will contain all the words we can found before the "chosen" word
pre_words = {"" : 0}
#this is for prevent the case when we have an ignore_listed word
word = str(sentence[0].split(" ")[0])
for x in xrange(0, len(self.words[word]) -1 ):
l, w = struct.unpack("iH", self.words[word][x])
context = self.lines[l][0]
num_context = self.lines[l][1]
cwords = context.split()
#if the word is not the first of the context, look the previous one
if cwords[w] != word:
print context
if w:
#look if we can found a pair with the choosen word, and the previous one
if len(sentence) > 1 and len(cwords) > w+1:
if sentence[1] != cwords[w+1]:
continue
#if the word is in ignore_list, look the previous word
look_for = cwords[w-1]
if look_for in self.settings.ignore_list and w > 1:
look_for = cwords[w-2]+" "+look_for
#saves how many times we can found each word
if not(pre_words.has_key(look_for)):
pre_words[look_for] = num_context
else :
pre_words[look_for] += num_context
else:
pre_words[""] += num_context
#Sort the words
liste = pre_words.items()
liste.sort(lambda x,y: cmp(y[1],x[1]))
numbers = [liste[0][1]]
for x in xrange(1, len(liste) ):
numbers.append(liste[x][1] + numbers[x-1])
#take one them from the list ( randomly )
mot = randint(0, numbers[len(numbers) -1])
for x in xrange(0, len(numbers)):
if mot <= numbers[x]:
mot = liste[x][0]
break
#if the word is already choosen, pick the next one
while mot in sentence:
x += 1
if x >= len(liste) -1:
mot = ''
mot = liste[x][0]
mot = mot.split(" ")
mot.reverse()
if mot == ['']:
done = 1
else:
map( (lambda x: sentence.insert(0, x) ), mot )
pre_words = sentence
sentence = sentence[-2:]
# Now build sentence forwards from "chosen" word
#We've got
#cwords: ... cwords[w-1] cwords[w] cwords[w+1] cwords[w+2]
#sentence: ... sentence[-2] sentence[-1] look_for look_for ?
#we are looking, for a cwords[w] known, and maybe a cwords[w-1] known, what will be the cwords[w+1] to choose.
#cwords[w+2] is need when cwords[w+1] is in ignored list
done = 0
while done == 0:
#create a dictionary wich will contain all the words we can found before the "chosen" word
post_words = {"" : 0}
word = str(sentence[-1].split(" ")[-1])
for x in xrange(0, len(self.words[word]) ):
l, w = struct.unpack("iH", self.words[word][x])
context = self.lines[l][0]
num_context = self.lines[l][1]
cwords = context.split()
#look if we can found a pair with the choosen word, and the next one
if len(sentence) > 1:
if sentence[len(sentence)-2] != cwords[w-1]:
continue
if w < len(cwords)-1:
#if the word is in ignore_list, look the next word
look_for = cwords[w+1]
if look_for in self.settings.ignore_list and w < len(cwords) -2:
look_for = look_for+" "+cwords[w+2]
if not(post_words.has_key(look_for)):
post_words[look_for] = num_context
else :
post_words[look_for] += num_context
else:
post_words[""] += num_context
#Sort the words
liste = post_words.items()
liste.sort(lambda x,y: cmp(y[1],x[1]))
numbers = [liste[0][1]]
for x in xrange(1, len(liste) ):
numbers.append(liste[x][1] + numbers[x-1])
#take one them from the list ( randomly )
mot = randint(0, numbers[len(numbers) -1])
for x in xrange(0, len(numbers)):
if mot <= numbers[x]:
mot = liste[x][0]
break
x = -1
while mot in sentence:
x += 1
if x >= len(liste) -1:
mot = ''
break
mot = liste[x][0]
mot = mot.split(" ")
if mot == ['']:
done = 1
else:
map( (lambda x: sentence.append(x) ), mot )
sentence = pre_words[:-2] + sentence
#Replace aliases
for x in xrange(0, len(sentence)):
if sentence[x][0] == "~": sentence[x] = sentence[x][1:]
#Insert space between each words
map( (lambda x: sentence.insert(1+x*2, " ") ), xrange(0, len(sentence)-1) )
#correct the ' & , spaces problem
#code is not very good and can be improve but does his job...
for x in xrange(0, len(sentence)):
if sentence[x] == "'":
sentence[x-1] = ""
sentence[x+1] = ""
if sentence[x] == ",":
sentence[x-1] = ""
#return as string..
return "".join(sentence)
def learn(self, body, num_context=1):
"""
Lines should be cleaned (filter_message()) before passing
to this.
"""
def learn_line(self, body, num_context):
"""
Learn from a sentence.
"""
import re
words = body.split()
# Ignore sentences of < 1 words XXX was <3
if len(words) < 1:
return
voyelles = "aà âeéèêiîïoöôuüûy"
for x in xrange(0, len(words)):
nb_voy = 0
digit = 0
char = 0
for c in words[x]:
if c in voyelles:
nb_voy += 1
if c.isalpha():
char += 1
if c.isdigit():
digit += 1
for censored in self.settings.censored:
pattern = "^%s$" % censored
if re.search(pattern, words[x]):
print "Censored word %s" %words[x]
return
if len(words[x]) > 13 \
or ( ((nb_voy*100) / len(words[x]) < 26) and len(words[x]) > 5 ) \
or ( char and digit ) \
or ( self.words.has_key(words[x]) == 0 and self.settings.learning == 0 ):
#if one word as more than 13 characters, don't learn
# ( in french, this represent 12% of the words )
#and d'ont learn words where there are less than 25% of voyels
#don't learn the sentence if one word is censored
#don't learn too if there are digits and char in the word
#same if learning is off
return
elif ( "-" in words[x] or "_" in words[x] ) :
words[x]="#nick"
num_w = self.settings.num_words
if num_w != 0:
num_cpw = self.settings.num_contexts/float(num_w) # contexts per word
else:
num_cpw = 0
cleanbody = " ".join(words)
# Hash collisions we don't care about. 2^32 is big :-)
hashval = ctypes.c_int32(hash(cleanbody)).value
# Check context isn't already known
if not self.lines.has_key(hashval):
if not(num_cpw > 100 and self.settings.learning == 0):
self.lines[hashval] = [cleanbody, num_context]
# Add link for each word
for x in xrange(0, len(words)):
if self.words.has_key(words[x]):
# Add entry. (line number, word number)
self.words[words[x]].append(struct.pack("iH", hashval, x))
else:
self.words[words[x]] = [ struct.pack("iH", hashval, x) ]
self.settings.num_words += 1
self.settings.num_contexts += 1
else :
self.lines[hashval][1] += num_context
#is max_words reached, don't learn more
if self.settings.num_words >= self.settings.max_words: self.settings.learning = 0
# Split body text into sentences and parse them
# one by one.
body += " "
map ( (lambda x : learn_line(self, x, num_context)), body.split(". "))<|fim▁end|> | def unlearn(self, context): |
<|file_name|>latexexport.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# import the main window object (mw) from ankiqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo<|fim▁hole|>from anki.exporting import *
from anki.hooks import addHook
class LatexNoteExporter(Exporter):
key = _("Notes in Latex")
ext = ".tex"
def __init__(self, col):
Exporter.__init__(self, col)
self.includeID = True
self.includeTags = True
def replaceLineBreaks(self,text):
"Replace html-line breaks by plain-text line breaks"
#remove plain-text line breaks (most probobly there aren't any to begin with)
text = text.replace("\n","")
#convert some html
htmldict = {r"<br>":"\n",
r"<br />":"\n",
r"<div>":"\n",
r"</div>":"",
r" ":r" "}
for k, v in htmldict.items():
text = text.replace(k, v)
return text
def stripNewLines(self,text):
"Remove newlines at beginning and end of text, and replace double blank lines by single blank lines"
text = re.sub("\n\s*\n+","\n\n",text).strip()
#the following is superfluous as its done automatically by strip()
#while len(text) > 0 and text[1] == "\n":
# text = text[1:].strip()
#while len(text) > 0 and text[-1] == "\n":
# text = text[:-1].strip()
return text
def htmlToLatex(self, text):
"Remove [latex], [/latex] and html"
#convert some html
htmldict = {r"&":r"&",
r"<":r"<",
r">":r">"}
for k, v in htmldict.items():
text = text.replace(k, v)
#remove all remaining html
text = re.sub("<[^<]+?>", "", text)
#remove latex marks and any surrounding line breaks
text = re.sub("\n*\[latex\]","",text)
text = re.sub("\[/latex\]\n*","",text)
return text
def doExport(self, file):
cardIds = self.cardIds()
data = []
model = mw.col.getCard(cardIds[0]).model()
for id, flds, tags in self.col.db.execute("""
select guid, flds, tags from notes
where id in
(select nid from cards
where cards.id in %s)""" % ids2str(cardIds)):
latexnote = []
TAB = " "
latexnote.append(r"\begin{note}")
# fields
for f in splitFields(flds):
newf = self.replaceLineBreaks(f)
if newf.find("[latex]") != -1:
#treat as latex field
newf = self.htmlToLatex(newf)
if newf.find("\n") == -1:
#field consists of a single line
latexnote.append(TAB + r"\xfield{" + newf + "}")
else:
newf = self.stripNewLines(newf)
newf = TAB + TAB + newf.replace("\n","\n" + TAB + TAB)
latexnote.append(TAB + r"\begin{field}" + "\n" + newf + "\n" + TAB + r"\end{field}")
else:
#treat as plain-text field
if newf.find("\n") == -1:
#field consists of a single line
latexnote.append(TAB + r"\xplain{" + newf + "}")
else:
newf = self.stripNewLines(newf)
newf = TAB + TAB + newf.replace("\n","\n" + TAB + TAB)
latexnote.append(TAB + r"\begin{plain}" + "\n" + newf + "\n" + TAB + r"\end{plain}")
#remove empty fields at the end of the note:
while latexnote[-1] == TAB + r"\xplain{}":
latexnote.pop()
# tags
if self.includeTags:
cleantag = tags.strip()
if cleantag != "":
latexnote.append(TAB + r"\tags{" + tags.strip() + r"}")
latexnote.append(r"\end{note}" + "\n")
data.append("\n".join(latexnote))
self.count = len(data)
#preamble =r"""# -- I've decided that this should be placed in model["latexPre"] by the user
#\newenvironment{note}{}{\begin{center}\rule{\textwidth}{2pt}\end{center}}
#\newenvironment{field}{}{\begin{center}\rule{\textwidth}{0.4pt}\end{center}}
#\newcommand*{\tags}[1]{\paragraph{tags: }#1}"""
out = "% -*- coding: utf-8 -*-\n" + model["latexPre"] + "\n" + "\n".join(data) + "\n" + model["latexPost"]
file.write(out.encode("utf-8"))
def addLatexExporterToList(exps):
exps.append((LatexNoteExporter.key + " (*" + LatexNoteExporter.ext + r")", LatexNoteExporter))
addHook("exportersList", addLatexExporterToList);<|fim▁end|> | # import all of the Qt GUI library
from aqt.qt import * |
<|file_name|>frmSelec.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Gaspar/.qgis/python/plugins/delPropiedad/forms_ui/frmSelec.ui'
#
# Created: Wed Jul 18 12:50:20 2012
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_frmSelec(object):
def setupUi(self, frmSelec):
frmSelec.setObjectName(_fromUtf8("frmSelec"))
frmSelec.resize(972, 310)
frmSelec.setWindowTitle(QtGui.QApplication.translate("frmSelec", "Seleccionar trabajo", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget = QtGui.QTableWidget(frmSelec)
self.tableWidget.setGeometry(QtCore.QRect(10, 30, 951, 231))
self.tableWidget.setToolTip(QtGui.QApplication.translate("frmSelec", "Seleccione una fila y pulse aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
<|fim▁hole|> self.bttAceptar = QtGui.QPushButton(frmSelec)
self.bttAceptar.setGeometry(QtCore.QRect(440, 270, 111, 31))
self.bttAceptar.setText(QtGui.QApplication.translate("frmSelec", "Aceptar", None, QtGui.QApplication.UnicodeUTF8))
self.bttAceptar.setObjectName(_fromUtf8("bttAceptar"))
self.bttCancelar = QtGui.QPushButton(frmSelec)
self.bttCancelar.setGeometry(QtCore.QRect(570, 270, 91, 31))
self.bttCancelar.setText(QtGui.QApplication.translate("frmSelec", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
self.bttCancelar.setObjectName(_fromUtf8("bttCancelar"))
self.label = QtGui.QLabel(frmSelec)
self.label.setGeometry(QtCore.QRect(20, 10, 331, 16))
self.label.setText(QtGui.QApplication.translate("frmSelec", "Selecciones el trabajo que desea consultar:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(frmSelec)
QtCore.QMetaObject.connectSlotsByName(frmSelec)
def retranslateUi(self, frmSelec):
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
frmSelec = QtGui.QDialog()
ui = Ui_frmSelec()
ui.setupUi(frmSelec)
frmSelec.show()
sys.exit(app.exec_())<|fim▁end|> | self.tableWidget.setRowCount(0)
|
<|file_name|>example-oauthenticator.py<|end_file_name|><|fim▁begin|>"""
Example OAuthenticator to use with My Service
"""
import json
from jupyterhub.auth import LocalAuthenticator
from oauthenticator.oauth2 import OAuthLoginHandler, OAuthenticator
from tornado.auth import OAuth2Mixin
from tornado.httputil import url_concat
from tornado.httpclient import HTTPRequest, AsyncHTTPClient, HTTPError
class MyServiceMixin(OAuth2Mixin):
# authorize is the URL users are redirected to to authorize your service
_OAUTH_AUTHORIZE_URL = "https://myservice.biz/login/oauth/authorize"
# token is the URL JupyterHub accesses to finish the OAuth process
_OAUTH_ACCESS_TOKEN_URL = "https://myservice.biz/login/oauth/access_token"
class MyServiceLoginHandler(OAuthLoginHandler, MyServiceMixin):
pass
class GitHubOAuthenticator(OAuthenticator):
# login_service is the text displayed on the "Login with..." button
login_service = "My Service"
login_handler = MyServiceLoginHandler
async def authenticate(self, handler, data=None):
"""We set up auth_state based on additional GitHub info if we
receive it.
"""
code = handler.get_argument("code")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
# Exchange the OAuth code for an Access Token
# this is the TOKEN URL in your provider
params = dict(
client_id=self.client_id, client_secret=self.client_secret, code=code
)
url = url_concat("https://myservice.biz/login/oauth/access_token", params)
req = HTTPRequest(
url, method="POST", headers={"Accept": "application/json"}, body=''
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
if 'access_token' in resp_json:
access_token = resp_json['access_token']
elif 'error_description' in resp_json:
raise HTTPError(
403,
"An access token was not returned: {}".format(
resp_json['error_description']
),
)
else:
raise HTTPError(500, "Bad response: %s".format(resp))
# Determine who the logged in user is
# by using the new access token to make a request
# check with your OAuth provider for this URL.<|fim▁hole|>
req = HTTPRequest(
"https://myservice.biz/api/user",
method="GET",
headers={"Authorization": f"Bearer {access_token}"},
)
resp = await http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
# check the documentation for what field contains a unique username
# it might not be the 'username'!
username = resp_json["username"]
if not username:
# return None means that no user is authenticated
# and login has failed
return None
# here we can add additional checks such as against team whitelists
# if the OAuth provider has such a concept
# 'name' is the JupyterHub username
user_info = {"name": username}
# We can also persist auth state,
# which is information encrypted in the Jupyter database
# and can be passed to the Spawner for e.g. authenticated data access
# these fields are up to you, and not interpreted by JupyterHub
# see Authenticator.pre_spawn_start for how to use this information
user_info["auth_state"] = auth_state = {}
auth_state['access_token'] = access_token
auth_state['auth_reply'] = resp_json
return user_info
class LocalGitHubOAuthenticator(LocalAuthenticator, GitHubOAuthenticator):
"""A version that mixes in local system user creation"""
pass<|fim▁end|> | # it could also be in the response to the token request,
# making this request unnecessary. |
<|file_name|>serializers.py<|end_file_name|><|fim▁begin|>from .models import Project,Member,Contact,Technology,Contributor
from rest_framework import serializers
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = Contact<|fim▁hole|>class MemberSerializer(serializers.ModelSerializer):
contacts = ContactSerializer(many=True)
class Meta:
model = Member
fields = ('name', 'post', 'img', 'contacts')
class ContributorSerializer(serializers.ModelSerializer):
class Meta:
model=Contributor
fields = ('name','github')
class ProjectSerializer(serializers.ModelSerializer):
contributors=ContributorSerializer(many=True)
class Meta:
model = Project
fields = ('slug','name','type','desc','icon','technologies','long_desc','contributors','meta')<|fim▁end|> | fields = ('name', 'link')
|
<|file_name|>region-bounds-on-objects-and-type-parameters.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test related to when a region bound is required to be specified.
trait IsStatic : 'static { }
trait IsSend : Send { }
trait Is<'a> : 'a { }
trait Is2<'a> : 'a { }
trait SomeTrait { }<|fim▁hole|>// Bounds on object types:
struct Foo<'a,'b,'c> {
// All of these are ok, because we can derive exactly one bound:
a: Box<IsStatic>,
b: Box<Is<'static>>,
c: Box<Is<'a>>,
d: Box<IsSend>,
e: Box<Is<'a>+Send>, // we can derive two bounds, but one is 'static, so ok
f: Box<SomeTrait>, // OK, defaults to 'static due to RFC 599.
g: Box<SomeTrait+'a>,
z: Box<Is<'a>+'b+'c>, //~ ERROR only a single explicit lifetime bound is permitted
}
fn test<
'a,
'b,
A:IsStatic,
B:Is<'a>+Is2<'b>, // OK in a parameter, but not an object type.
C:'b+Is<'a>+Is2<'b>,
D:Is<'a>+Is2<'static>,
E:'a+'b // OK in a parameter, but not an object type.
>() { }
fn main() { }<|fim▁end|> | |
<|file_name|>TipoServicio.java<|end_file_name|><|fim▁begin|>/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright 2016 Oracle and/or its affiliates. All rights reserved.
*
* Oracle and Java are registered trademarks of Oracle and/or its affiliates.
* Other names may be trademarks of their respective owners.
*
* The contents of this file are subject to the terms of either the GNU
* General Public License Version 2 only ("GPL") or the Common
* Development and Distribution License("CDDL") (collectively, the
* "License"). You may not use this file except in compliance with the
* License. You can obtain a copy of the License at
* http://www.netbeans.org/cddl-gplv2.html
* or nbbuild/licenses/CDDL-GPL-2-CP. See the License for the
* specific language governing permissions and limitations under the
* License. When distributing the software, include this License Header
* Notice in each file and include the License file at
* nbbuild/licenses/CDDL-GPL-2-CP. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the GPL Version 2 section of the License file that
* accompanied this code. If applicable, add the following below the
* License Header, with the fields enclosed by brackets [] replaced by
* your own identifying information:
* "Portions Copyrighted [year] [name of copyright owner]"
*
* If you wish your version of this file to be governed by only the CDDL
* or only the GPL Version 2, indicate your decision by adding
* "[Contributor] elects to include this software in this distribution
* under the [CDDL or GPL Version 2] license." If you do not indicate a
* single choice of license, a recipient has the option to distribute
* your version of this file under either the CDDL, the GPL Version 2 or
* to extend the choice of license to its licensees as provided above.
* However, if you add GPL Version 2 code and therefore, elected the GPL
* Version 2 license, then the option applies only if the new code is
* made subject to such option by the copyright holder.
*
* Contributor(s):
*
* Portions Copyrighted 2016 Sun Microsystems, Inc.
*/
package beans;
import java.io.Serializable;
import java.util.Collection;
import javax.persistence.Basic;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.OneToMany;
import javax.persistence.Table;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlTransient;
/**
*
* @author marc.gareta
*/
@Entity
@Table(name = "TIPO_SERVICIO", catalog = "", schema = "APP")
@XmlRootElement
@NamedQueries({
@NamedQuery(name = "TIPO_SERVICIO.findAll", query = "SELECT t FROM TipoServicio t"),
@NamedQuery(name = "TIPO_SERVICIO.findAllNombre", query = "SELECT t.nombre FROM TipoServicio t"),
@NamedQuery(name = "TIPO_SERVICIO.findById", query = "SELECT t FROM TipoServicio t WHERE t.id = :id"),
@NamedQuery(name = "TIPO_SERVICIO.findByNombre", query = "SELECT t FROM TipoServicio t WHERE t.nombre = :nombre"),
@NamedQuery(name = "TIPO_SERVICIO.deleteAll", query = "DELETE FROM TipoServicio t")})
public class TipoServicio implements Serializable {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
<|fim▁hole|> @Basic(optional = false)
@Column(nullable = false)
private Integer id;
@Column(length = 100)
private String nombre;
@OneToMany(mappedBy = "tipoServicio")
private Collection<ParteIncidencia> parteIncidenciaCollection;
public TipoServicio() {
}
public TipoServicio(Integer id) {
this.id = id;
}
public TipoServicio(String nombre) {
this.nombre = nombre;
}
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getNombre() {
return nombre;
}
public void setNombre(String nombre) {
this.nombre = nombre;
}
@XmlTransient
public Collection<ParteIncidencia> getParteIncidenciaCollection() {
return parteIncidenciaCollection;
}
public void setParteIncidenciaCollection(Collection<ParteIncidencia> parteIncidenciaCollection) {
this.parteIncidenciaCollection = parteIncidenciaCollection;
}
@Override
public int hashCode() {
int hash = 0;
hash += (id != null ? id.hashCode() : 0);
return hash;
}
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
if (!(object instanceof TipoServicio)) {
return false;
}
TipoServicio other = (TipoServicio) object;
if ((this.id == null && other.id != null) || (this.id != null && !this.id.equals(other.id))) {
return false;
}
return true;
}
@Override
public String toString() {
return "beans.TipoServicio[ id=" + id + " ]";
}
}<|fim▁end|> | |
<|file_name|>map-port.ts<|end_file_name|><|fim▁begin|>export class MapPort {<|fim▁hole|> name: string;
portNumber: number;
shortName: string;
}<|fim▁end|> | adapterNumber: number;
linkType: string; |
<|file_name|>bitcoin_it.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="it" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Bitcoin</source>
<translation>Info su BIGBullion</translation>
</message>
<message>
<location line="+39"/>
<source><b>Bitcoin</b> version</source>
<translation>Versione di <b>BIGBullion</b></translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Questo è un software sperimentale.
Distribuito sotto la licenza software MIT/X11, vedi il file COPYING incluso oppure su http://www.opensource.org/licenses/mit-license.php.
Questo prodotto include software sviluppato dal progetto OpenSSL per l'uso del Toolkit OpenSSL (http://www.openssl.org/), software crittografico scritto da Eric Young ([email protected]) e software UPnP scritto da Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Copyright</translation>
</message>
<message>
<location line="+0"/>
<source>The Bitcoin developers</source>
<translation>Sviluppatori di BIGBullion</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Rubrica</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Fai doppio click per modificare o cancellare l'etichetta</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Crea un nuovo indirizzo</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Copia l'indirizzo attualmente selezionato nella clipboard</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Nuovo indirizzo</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Bitcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Questi sono i tuoi indirizzi BIGBullion per ricevere pagamenti. Potrai darne uno diverso ad ognuno per tenere così traccia di chi ti sta pagando.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Copia l'indirizzo</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Mostra il codice &QR</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Bitcoin address</source>
<translation>Firma un messaggio per dimostrare di possedere questo indirizzo</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Firma il &messaggio</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Cancella l'indirizzo attualmente selezionato dalla lista</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Esporta i dati nella tabella corrente su un file</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&Esporta...</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Bitcoin address</source>
<translation>Verifica un messaggio per accertarsi che sia firmato con un indirizzo BIGBullion specifico</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Verifica Messaggio</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Cancella</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Bitcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>These are your BIGBullion addresses for sending payments. Always check the amount and the receiving address before sending coins.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Copia &l'etichetta</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Modifica</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Invia &BIGBullion</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Esporta gli indirizzi della rubrica</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Testo CSV (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Errore nell'esportazione</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Impossibile scrivere sul file %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Etichetta</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Indirizzo</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(nessuna etichetta)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Finestra passphrase</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Inserisci la passphrase</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nuova passphrase</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Ripeti la passphrase</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Inserisci la passphrase per il portamonete.<br/>Per piacere usare unapassphrase di <b>10 o più caratteri casuali</b>, o <b>otto o più parole</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Cifra il portamonete</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Quest'operazione necessita della passphrase per sbloccare il portamonete.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Sblocca il portamonete</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Quest'operazione necessita della passphrase per decifrare il portamonete,</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Decifra il portamonete</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Cambia la passphrase</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Inserisci la vecchia e la nuova passphrase per il portamonete.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Conferma la cifratura del portamonete</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR BITCOINS</b>!</source>
<translation>Attenzione: se si cifra il portamonete e si perde la frase d'ordine, <b>SI PERDERANNO TUTTI I PROPRI BIGBULLION</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Si è sicuri di voler cifrare il portamonete?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>IMPORTANTE: qualsiasi backup del portafoglio effettuato precedentemente dovrebbe essere sostituito con il file del portafoglio criptato appena generato. Per ragioni di sicurezza, i backup precedenti del file del portafoglio non criptato diventeranno inservibili non appena si inizi ad usare il nuovo portafoglio criptato.</translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Attenzione: tasto Blocco maiuscole attivo.</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Portamonete cifrato</translation>
</message>
<message>
<location line="-56"/>
<source>Bitcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your bitcoins from being stolen by malware infecting your computer.</source>
<translation>BIGBullion verrà ora chiuso per finire il processo di crittazione. Ricorda che criptare il tuo portamonete non può fornire una protezione totale contro furti causati da malware che dovessero infettare il tuo computer.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Cifratura del portamonete fallita</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Cifratura del portamonete fallita a causa di un errore interno. Il portamonete non è stato cifrato.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Le passphrase inserite non corrispondono.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>Sblocco del portamonete fallito</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>La passphrase inserita per la decifrazione del portamonete è errata.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Decifrazione del portamonete fallita</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Passphrase del portamonete modificata con successo.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Firma il &messaggio...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Sto sincronizzando con la rete...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Sintesi</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Mostra lo stato generale del portamonete</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Transazioni</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Cerca nelle transazioni</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Modifica la lista degli indirizzi salvati e delle etichette</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Mostra la lista di indirizzi su cui ricevere pagamenti</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>&Esci</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Chiudi applicazione</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Bitcoin</source>
<translation>Mostra informazioni su BIGBullion</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Informazioni su &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Mostra informazioni su Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opzioni...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Cifra il portamonete...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Backup Portamonete...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Cambia la passphrase...</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Importa blocchi dal disco...</translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Re-indicizzazione blocchi su disco...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Bitcoin address</source>
<translation>Invia monete ad un indirizzo bigbullion</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Bitcoin</source>
<translation>Modifica configurazione opzioni per bigbullion</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Backup portamonete in un'altra locazione</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Cambia la passphrase per la cifratura del portamonete</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>Finestra &Debug</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Apri la console di degugging e diagnostica</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Verifica messaggio...</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Bitcoin</source>
<translation>BIGBullion</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Portamonete</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Spedisci</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Ricevi</translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Indirizzi</translation>
</message>
<message>
<location line="+22"/>
<source>&About Bitcoin</source>
<translation>&Info su BIGBullion</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Mostra/Nascondi</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Mostra o nascondi la Finestra principale</translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Crittografa le chiavi private che appartengono al tuo portafoglio</translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Bitcoin addresses to prove you own them</source>
<translation>Firma i messaggi con il tuo indirizzo BIGBullion per dimostrare di possederli</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Bitcoin addresses</source>
<translation>Verifica i messaggi per accertarsi che siano stati firmati con gli indirizzi BIGBullion specificati</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&File</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Impostazioni</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Aiuto</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Barra degli strumenti "Tabs"</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Bitcoin client</source>
<translation>BIGBullion client</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Bitcoin network</source>
<translation><numerusform>%n connessione attiva alla rete BIGBullion</numerusform><numerusform>%n connessioni attive alla rete BIGBullion</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>Processati %1 di %2 (circa) blocchi della cronologia transazioni.</translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>Processati %1 blocchi della cronologia transazioni.</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n ora</numerusform><numerusform>%n ore</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n giorno</numerusform><numerusform>%n giorni</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n settimana</numerusform><numerusform>%n settimane</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>L'ultimo blocco ricevuto è stato generato %1 fa.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Errore</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Attenzione</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Informazione</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Questa transazione è superiore al limite di dimensione. È comunque possibile inviarla con una commissione di %1, che va ai nodi che processano la tua transazione e contribuisce a sostenere la rete. Vuoi pagare la commissione?</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Aggiornato</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>In aggiornamento...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Conferma compenso transazione</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Transazione inviata</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Transazione ricevuta</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Data: %1
Quantità: %2
Tipo: %3
Indirizzo: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>Gestione URI</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Bitcoin address or malformed URI parameters.</source>
<translation>Impossibile interpretare l'URI! Ciò può essere causato da un indirizzo BIGBullion invalido o da parametri URI non corretti.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Il portamonete è <b>cifrato</b> e attualmente <b>sbloccato</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Il portamonete è <b>cifrato</b> e attualmente <b>bloccato</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Bitcoin can no longer continue safely and will quit.</source>
<translation>Riscontrato un errore irreversibile. BIGBullion non può più continuare in sicurezza e verrà terminato.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Avviso di rete</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Modifica l'indirizzo</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Etichetta</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>L'etichetta associata a questo indirizzo nella rubrica</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Indirizzo</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>L'indirizzo associato a questa voce della rubrica. Si può modificare solo negli indirizzi di spedizione.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Nuovo indirizzo di ricezione</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nuovo indirizzo d'invio</translation><|fim▁hole|> <source>Edit receiving address</source>
<translation>Modifica indirizzo di ricezione</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Modifica indirizzo d'invio</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>L'indirizzo inserito "%1" è già in rubrica.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Bitcoin address.</source>
<translation>L'indirizzo inserito "%1" non è un indirizzo bigbullion valido.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Impossibile sbloccare il portamonete.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Generazione della nuova chiave non riuscita.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Bitcoin-Qt</source>
<translation>BIGBullion-Qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>versione</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Utilizzo:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>opzioni riga di comando</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>UI opzioni</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Imposta lingua, ad esempio "it_IT" (predefinita: lingua di sistema)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Parti in icona
</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Mostra finestra di presentazione all'avvio (default: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opzioni</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Principale</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Paga la &commissione</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Bitcoin after logging in to the system.</source>
<translation>Avvia automaticamente BIGBullion all'accensione del computer</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Bitcoin on system login</source>
<translation>&Fai partire BIGBullion all'avvio del sistema</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>Ripristina tutte le opzioni del client alle predefinite.</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>&Ripristina Opzioni</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>Rete</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Apri automaticamente la porta del client BIGBullion sul router. Questo funziona solo se il router supporta UPnP ed è abilitato.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Mappa le porte tramite l'&UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Bitcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Connettiti alla rete Bitcon attraverso un proxy SOCKS (ad esempio quando ci si collega via Tor)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Collegati tramite SOCKS proxy:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>&IP del proxy:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Indirizzo IP del proxy (ad esempio 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Porta:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Porta del proxy (es. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS &Version:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>Versione SOCKS del proxy (es. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Finestra</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Mostra solo un'icona nel tray quando si minimizza la finestra</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimizza sul tray invece che sulla barra delle applicazioni</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Riduci ad icona, invece di uscire dall'applicazione quando la finestra viene chiusa. Quando questa opzione è attivata, l'applicazione verrà chiusa solo dopo aver selezionato Esci nel menu.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimizza alla chiusura</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Mostra</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>&Lingua Interfaccia Utente:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Bitcoin.</source>
<translation>La lingua dell'interfaccia utente può essere impostata qui. L'impostazione avrà effetto dopo il riavvio di BIGBullion.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Unità di misura degli importi in:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Scegli l'unità di suddivisione di default per l'interfaccia e per l'invio di monete</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Bitcoin addresses in the transaction list or not.</source>
<translation>Se mostrare l'indirizzo BIGBullion nella transazione o meno.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>&Mostra gli indirizzi nella lista delle transazioni</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Cancella</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Applica</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>default</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>Conferma ripristino opzioni</translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>Alcune modifiche necessitano del riavvio del programma per essere salvate.</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>Vuoi procedere?</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Attenzione</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Bitcoin.</source>
<translation>L'impostazione avrà effetto dopo il riavvio di BIGBullion.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>L'indirizzo proxy che hai fornito è invalido.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Modulo</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Bitcoin network after a connection is established, but this process has not completed yet.</source>
<translation>Le informazioni visualizzate sono datate. Il tuo partafogli verrà sincronizzato automaticamente con il network BIGBullion dopo che la connessione è stabilita, ma questo processo non può essere completato ora.</translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Saldo</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Non confermato:</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Portamonete</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>Immaturo:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Importo scavato che non è ancora maturato</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Transazioni recenti</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Saldo attuale</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Totale delle transazioni in corso di conferma, che non sono ancora incluse nel saldo attuale</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>fuori sincrono</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start bitcoin: click-to-pay handler</source>
<translation>Cannot start BIGBullion: click-to-pay handler</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Codice QR di dialogo</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Richiedi pagamento</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Importo:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Etichetta:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Messaggio:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Salva come...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Errore nella codifica URI nel codice QR</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>L'importo specificato non è valido, prego verificare.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>L'URI risulta troppo lungo, prova a ridurre il testo nell'etichetta / messaggio.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Salva codice QR</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>Immagini PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Nome del client</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>N/D</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Versione client</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informazione</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Versione OpenSSL in uso</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Tempo di avvio</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Rete</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Numero connessioni</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Nel testnet</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Block chain</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Numero attuale di blocchi</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Numero totale stimato di blocchi</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Ora dell blocco piu recente</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Apri</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>opzioni riga di comando</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Bitcoin-Qt help message to get a list with possible Bitcoin command-line options.</source>
<translation>Mostra il messaggio di aiuto di BIGBullion-QT per avere la lista di tutte le opzioni della riga di comando di BIGBullion.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Mostra</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Console</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Data di creazione</translation>
</message>
<message>
<location line="-104"/>
<source>Bitcoin - Debug window</source>
<translation>BIGBullion - Finestra debug</translation>
</message>
<message>
<location line="+25"/>
<source>Bitcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>File log del Debug</translation>
</message>
<message>
<location line="+7"/>
<source>Open the Bitcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Apri il file di log del debug di BIGBullion dalla cartella attuale. Può richiedere alcuni secondi per file di log grandi.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Svuota console</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Bitcoin RPC console.</source>
<translation>Benvenuto nella console RPC di BIGBullion</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Usa le frecce direzionali per navigare la cronologia, and <b>Ctrl-L</b> per cancellarla.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Scrivi <b>help</b> per un riassunto dei comandi disponibili</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Spedisci BIGBullion</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Spedisci a diversi beneficiari in una volta sola</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>&Aggiungi beneficiario</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Rimuovi tutti i campi della transazione</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Cancella &tutto</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BIG</source>
<translation>123,456 BIG</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Conferma la spedizione</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Spedisci</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> to %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Conferma la spedizione di bigbullion</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Si è sicuri di voler spedire %1?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation> e </translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>L'indirizzo del beneficiario non è valido, per cortesia controlla.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>L'importo da pagare dev'essere maggiore di 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>L'importo è superiore al saldo attuale</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Il totale è superiore al saldo attuale includendo la commissione %1.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Trovato un indirizzo doppio, si può spedire solo una volta a ciascun indirizzo in una singola operazione.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Errore: Creazione transazione fallita!</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Errore: la transazione è stata rifiutata. Ciò accade se alcuni bigbullion nel portamonete sono stati già spesi, ad esempio se è stata usata una copia del file wallet.dat e i bigbullion sono stati spesi dalla copia ma non segnati come spesi qui.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Modulo</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Importo:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Paga &a:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>L'indirizzo del beneficiario a cui inviare il pagamento (ad esempio 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Inserisci un'etichetta per questo indirizzo, per aggiungerlo nella rubrica</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Etichetta</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Scegli l'indirizzo dalla rubrica</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Incollare l'indirizzo dagli appunti</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Rimuovere questo beneficiario</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Inserisci un indirizzo BIGBullion (ad esempio 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Firme - Firma / Verifica un messaggio</translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Firma il messaggio</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Puoi firmare messeggi con i tuoi indirizzi per dimostrare che sono tuoi. Fai attenzione a non firmare niente di vago, visto che gli attacchi di phishing potrebbero cercare di spingerti a mettere la tua firma su di loro. Firma solo dichiarazioni completamente dettagliate con cui sei d'accordo.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Inserisci un indirizzo BIGBullion (ad esempio 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Scegli l'indirizzo dalla rubrica</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Incollare l'indirizzo dagli appunti</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Inserisci qui il messaggio che vuoi firmare</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Firma</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Copia la firma corrente nella clipboard</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Bitcoin address</source>
<translation>Firma un messaggio per dimostrare di possedere questo indirizzo</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Firma &messaggio</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Reimposta tutti i campi della firma</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Cancella &tutto</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Verifica Messaggio</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Inserisci un indirizzo BIGBullion (ad esempio 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Bitcoin address</source>
<translation>Verifica il messaggio per assicurarsi che sia stato firmato con l'indirizzo BIGBullion specificato</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>&Verifica Messaggio</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Reimposta tutti i campi della verifica messaggio</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Bitcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source>
<translation>Inserisci un indirizzo BIGBullion (ad esempio 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Clicca "Firma il messaggio" per ottenere la firma</translation>
</message>
<message>
<location line="+3"/>
<source>Enter Bitcoin signature</source>
<translation>Inserisci firma BIGBullion</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>L'indirizzo inserito non è valido.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Per favore controlla l'indirizzo e prova ancora</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>L'indirizzo bigbullion inserito non è associato a nessuna chiave.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Sblocco del portafoglio annullato.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>La chiave privata per l'indirizzo inserito non è disponibile.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Firma messaggio fallita.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Messaggio firmato.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Non è stato possibile decodificare la firma.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Per favore controlla la firma e prova ancora.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>La firma non corrisponde al sunto del messaggio.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Verifica messaggio fallita.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Messaggio verificato.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Bitcoin developers</source>
<translation>Sviluppatori di BIGBullion</translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Aperto fino a %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/non confermato</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 conferme</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Stato</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, trasmesso attraverso %n nodo</numerusform><numerusform>, trasmesso attraverso %n nodi</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Sorgente</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generato</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Da</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>A</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>proprio indirizzo</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>etichetta</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Credito</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>matura in %n ulteriore blocco</numerusform><numerusform>matura in altri %n blocchi</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>non accettate</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debito</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Tranzakciós díj</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Importo netto</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Messaggio</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Commento</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>ID della transazione</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Bisogna attendere 120 blocchi prima di spendere I bigbullion generati. Quando è stato generato questo blocco, è stato trasmesso alla rete per aggiungerlo alla catena di blocchi. Se non riesce a entrare nella catena, verrà modificato in "non accettato" e non sarà spendibile. Questo può accadere a volte, se un altro nodo genera un blocco entro pochi secondi del tuo.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Informazione di debug</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transazione</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>Input</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Importo</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>vero</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>falso</translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, non è stato ancora trasmesso con successo</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Aperto per %n altro blocco</numerusform><numerusform>Aperto per altri %n blocchi</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>sconosciuto</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Dettagli sulla transazione</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Questo pannello mostra una descrizione dettagliata della transazione</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Indirizzo</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Importo</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Aperto per %n altro blocco</numerusform><numerusform>Aperto per altri %n blocchi</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Aperto fino a %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Offline (%1 conferme)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Non confermati (%1 su %2 conferme)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Confermato (%1 conferme)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Il saldo generato sarà disponibile quando maturerà in %n altro blocco</numerusform><numerusform>Il saldo generato sarà disponibile quando maturerà in altri %n blocchi</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Questo blocco non è stato ricevuto da altri nodi e probabilmente non sarà accettato!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generati, ma non accettati</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Ricevuto tramite</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Ricevuto da</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Spedito a</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Pagamento a te stesso</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Ottenuto dal mining</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(N / a)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Stato della transazione. Passare con il mouse su questo campo per vedere il numero di conferme.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Data e ora in cui la transazione è stata ricevuta.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Tipo di transazione.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Indirizzo di destinazione della transazione.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Importo rimosso o aggiunto al saldo.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Tutti</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Oggi</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Questa settimana</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Questo mese</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Il mese scorso</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Quest'anno</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Intervallo...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Ricevuto tramite</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Spedito a</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>A te</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Ottenuto dal mining</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Altro</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Inserisci un indirizzo o un'etichetta da cercare</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Importo minimo</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Copia l'indirizzo</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Copia l'etichetta</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Copia l'importo</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Modifica l'etichetta</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Mostra i dettagli della transazione</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Esporta i dati della transazione</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Testo CSV (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Confermato</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Data</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tipo</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Etichetta</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Indirizzo</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Importo</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Errore nell'esportazione</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Impossibile scrivere sul file %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Intervallo:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>a</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Spedisci BIGBullion</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Esporta i dati nella tabella corrente su un file</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Backup fallito</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Backup eseguito con successo</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Il portafoglio è stato correttamente salvato nella nuova cartella.</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Bitcoin version</source>
<translation>Versione di BIGBullion</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Utilizzo:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or bitcoind</source>
<translation>Manda il comando a -server o bitcoind
</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Lista comandi
</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Aiuto su un comando
</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Opzioni:
</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: bitcoin.conf)</source>
<translation>Specifica il file di configurazione (di default: bitcoin.conf)
</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: bitcoind.pid)</source>
<translation>Specifica il file pid (default: bitcoind.pid)
</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Specifica la cartella dati
</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Imposta la dimensione cache del database in megabyte (default: 25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 11055 or testnet: 5744)</source>
<translation>Ascolta le connessioni JSON-RPC su <porta> (default: 11055 o testnet: 5744)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Mantieni al massimo <n> connessioni ai peer (default: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Connessione ad un nodo per ricevere l'indirizzo del peer, e disconnessione</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Specifica il tuo indirizzo pubblico</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Soglia di disconnessione dei peer di cattiva qualità (default: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Numero di secondi di sospensione che i peer di cattiva qualità devono trascorrere prima di riconnettersi (default: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Errore riscontrato durante l'impostazione della porta RPC %u per l'ascolto su IPv4: %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 21055 or testnet: 5745)</source>
<translation>Attendi le connessioni JSON-RPC su <porta> (default: 21055 or testnet: 5745)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Accetta da linea di comando e da comandi JSON-RPC
</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Esegui in background come demone e accetta i comandi
</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Utilizza la rete di prova
</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accetta connessioni dall'esterno (default: 1 se no -proxy o -connect)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=bitcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Bitcoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Errore riscontrato durante l'impostazione della porta RPC %u per l'ascolto su IPv6, tornando su IPv4: %s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Collega all'indirizzo indicato e resta sempre in ascolto su questo. Usa la notazione [host]:porta per l'IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Bitcoin is probably already running.</source>
<translation>Non è possibile ottenere i dati sulla cartella %s. Probabilmente BIGBullion è già in esecuzione.</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Errore: la transazione è stata rifiutata. Ciò accade se alcuni bigbullion nel portamonete sono stati già spesi, ad esempio se è stata usata una copia del file wallet.dat e i bigbullion sono stati spesi dalla copia ma non segnati come spesi qui.</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>Errore: questa transazione necessita di una commissione di almeno %s a causa del suo ammontare, della sua complessità, o dell'uso di fondi recentemente ricevuti!</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Esegui comando quando una transazione del portafoglio cambia (%s in cmd è sostituito da TxID)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Imposta dimensione massima delle transazioni ad alta priorità/bassa-tassa in bytes (predefinito: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Questa versione è una compilazione pre-rilascio - usala a tuo rischio - non utilizzarla per la generazione o per applicazioni di commercio</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Attenzione: -paytxfee è molto alta. Questa è la commissione che si paga quando si invia una transazione.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Attenzione: le transazioni mostrate potrebbero essere sbagliate! Potresti aver bisogno di aggiornare, o altri nodi ne hanno bisogno.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Bitcoin will not work properly.</source>
<translation>Attenzione: si prega di controllare che la data del computer e l'ora siano corrette. Se il vostro orologio è sbagliato BIGBullion non funziona correttamente.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Attenzione: errore di lettura di wallet.dat! Tutte le chiave lette correttamente, ma i dati delle transazioni o le voci in rubrica potrebbero mancare o non essere corretti.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Attenzione: wallet.dat corrotto, dati salvati! Il wallet.dat originale salvato come wallet.{timestamp}.bak in %s; se il tuo bilancio o le transazioni non sono corrette dovresti ripristinare da un backup.</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Tenta di recuperare le chiavi private da un wallet.dat corrotto</translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Opzioni creazione blocco:</translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Connetti solo al nodo specificato</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Rilevato database blocchi corrotto</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Scopri proprio indirizzo IP (default: 1 se in ascolto e no -externalip)</translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Vuoi ricostruire ora il database dei blocchi?</translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>Errore caricamento database blocchi</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Errore caricamento database blocchi</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Errore: la spazio libero sul disco è poco!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Errore: portafoglio bloccato, impossibile creare la transazione!</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Errore: errore di sistema:</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Impossibile mettersi in ascolto su una porta. Usa -listen=0 se vuoi usare questa opzione.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>Lettura informazioni blocco fallita</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>Lettura blocco fallita</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>Scrittura informazioni blocco fallita</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>Scrittura blocco fallita</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>Scrittura informazioni file fallita</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>Scrittura nel database dei bigbullion fallita</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Trova peer utilizzando la ricerca DNS (predefinito: 1 finché utilizzato -connect)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>Quanti blocchi da controllare all'avvio (predefinito: 288, 0 = tutti)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>Verifica blocchi...</translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Verifica portafoglio...</translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Importa blocchi da un file blk000??.dat esterno</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>Informazione</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Indirizzo -tor non valido: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Buffer di ricezione massimo per connessione, <n>*1000 byte (default: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Buffer di invio massimo per connessione, <n>*1000 byte (default: 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Connetti solo a nodi nella rete <net> (IPv4, IPv6 o Tor)</translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Produci informazioni extra utili al debug. Implies all other -debug* options</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Genera informazioni extra utili al debug della rete</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Anteponi all'output di debug una marca temporale</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>Opzioni SSL: (vedi il wiki di BIGBullion per le istruzioni di configurazione SSL)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Selezionare la versione del proxy socks da usare (4-5, default: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Invia le informazioni di trace/debug alla console invece che al file debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Invia le informazioni di trace/debug al debugger</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Imposta dimensione massima del blocco in bytes (predefinito: 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Imposta dimensione minima del blocco in bytes (predefinito: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Riduci il file debug.log all'avvio del client (predefinito: 1 se non impostato -debug)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Specifica il timeout di connessione in millisecondi (default: 5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>Errore di sistema:</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>UPnP-használat engedélyezése a figyelő port feltérképezésénél (default: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>UPnP-használat engedélyezése a figyelő port feltérképezésénél (default: 1 when listening)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Usa un proxy per raggiungere servizi nascosti di tor (predefinito: uguale a -proxy)</translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Nome utente per connessioni JSON-RPC
</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>Attenzione</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Attenzione: questa versione è obsoleta, aggiornamento necessario!</translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrotto, salvataggio fallito</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Password per connessioni JSON-RPC
</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Consenti connessioni JSON-RPC dall'indirizzo IP specificato
</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Inviare comandi al nodo in esecuzione su <ip> (default: 127.0.0.1)
</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Esegui il comando quando il miglior block cambia(%s nel cmd è sostituito dall'hash del blocco)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Aggiorna il wallet all'ultimo formato</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Impostare la quantità di chiavi di riserva a <n> (default: 100)
</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Ripeti analisi della catena dei blocchi per cercare le transazioni mancanti dal portamonete
</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Utilizzare OpenSSL (https) per le connessioni JSON-RPC
</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>File certificato del server (default: server.cert)
</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Chiave privata del server (default: server.pem)
</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Cifrari accettabili (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)
</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Questo messaggio di aiuto
</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Impossibile collegarsi alla %s su questo computer (bind returned error %d, %s)</translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Connessione tramite socks proxy</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Consenti ricerche DNS per aggiungere nodi e collegare
</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Caricamento indirizzi...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Errore caricamento wallet.dat: Wallet corrotto</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Bitcoin</source>
<translation>Errore caricamento wallet.dat: il wallet richiede una versione nuova di BIGBullion</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Bitcoin to complete</source>
<translation>Il portamonete deve essere riscritto: riavviare BIGBullion per completare</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Errore caricamento wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Indirizzo -proxy non valido: '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Rete sconosciuta specificata in -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Versione -socks proxy sconosciuta richiesta: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Impossibile risolvere -bind address: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Impossibile risolvere indirizzo -externalip: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Importo non valido per -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Importo non valido</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Fondi insufficienti</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Caricamento dell'indice del blocco...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Elérendő csomópont megadása and attempt to keep the connection open</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Bitcoin is probably already running.</source>
<translation>Impossibile collegarsi alla %s su questo computer. Probabilmente BIGBullion è già in esecuzione.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Commissione per KB da aggiungere alle transazioni in uscita</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Caricamento portamonete...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>Non è possibile retrocedere il wallet</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Non è possibile scrivere l'indirizzo di default</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Ripetere la scansione...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Caricamento completato</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Per usare la opzione %s</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Errore</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Devi settare rpcpassword=<password> nel file di configurazione: %s Se il file non esiste, crealo con i permessi di amministratore</translation>
</message>
</context>
</TS><|fim▁end|> | </message>
<message>
<location line="+3"/> |
<|file_name|>test_column.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.tests.test_metadata import MetaBaseTest
import operator
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy import table
from astropy import time
from astropy import units as u
class TestColumn():
def test_subclass(self, Column):
c = Column(name='a')
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for op, test_equal in ((operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False)):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == '|b1'
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for ufunc, test_true in ((np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False)):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == '|b1'
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == f"<{Column.__name__} dtype='int64' length=3>\n1\n2\n3"
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from astropy import conf
with conf.set_temp('max_lines', 8):
c1 = Column(np.arange(2000), name='a', dtype=float,
format='%6.2f')
assert str(c1).splitlines() == [' a ',
'-------',
' 0.00',
' 1.00',
' ...',
'1998.00',
'1999.00',
'Length = 2000 rows']
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name='a', dtype='i8')
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype='i4')
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name='a', data=[1., 2., 3.])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name='a', data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name='a', data=[[1, 2, 3],
[4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_quantity_comparison(self, Column):
# regression test for gh-6532
c = Column([1, 2100, 3], unit='Hz')
q = 2 * u.kHz
check = c < q
assert np.all(check == [True, False, True])
# This already worked, but just in case.
check = q >= c
assert np.all(check == [True, False, True])
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column([1, 2, 3], name='a', unit='m', format='%i',
description='aa', meta={'a': 1})
c1.name = 'b'
c1.unit = 'km'
c1.format = '%d'
c1.description = 'bb'
c1.meta = {'bbb': 2}
for item in (slice(None, None), slice(None, 1), np.array([0, 2]),
np.array([False, True, False])):
c2 = c1[item]
assert c2.name == 'b'
assert c2.unit is u.km
assert c2.format == '%d'
assert c2.description == 'bb'
assert c2.meta == {'bbb': 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ('name', 'unit', 'format', 'description', 'meta'):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)
assert np.all(d.quantity == d.to('m'))
assert np.all(d.quantity.value == d.to('m').value)
np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933])
d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5] * u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explicit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name='a', dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_to_funcunit_quantity(self, Column):
"""
Tests for #8424, check if function-unit can be retrieved from column.
"""
d = Column([1, 2, 3], name='a', dtype="f8", unit="dex(AA)")
assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))
assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)
assert np.all(d.quantity == d.to("dex(AA)"))
assert np.all(d.quantity.value == d.to("dex(AA)").value)
# make sure, casting to linear unit works
q = [10, 100, 1000] * u.AA
np.testing.assert_allclose(d.to(u.AA), q)
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_string_expand(self, Column):
c = Column(['a', 'b'])
c1 = c.insert(0, 'abc')
assert np.all(c1 == ['abc', 'a', 'b'])
c = Column(['a', 'b'])
c1 = c.insert(0, ['c', 'def'])
assert np.all(c1 == ['c', 'def', 'a', 'b'])
def test_insert_string_masked_values(self):
c = table.MaskedColumn(['a', 'b'])
c1 = c.insert(0, np.ma.masked)
assert np.all(c1 == ['', 'a', 'b'])
assert np.all(c1.mask == [True, False, False])
assert c1.dtype == 'U1'
c2 = c.insert(1, np.ma.MaskedArray(['ccc', 'dd'], mask=[True, False]))
assert np.all(c2 == ['a', 'ccc', 'dd', 'b'])
assert np.all(c2.mask == [False, True, False, False])
assert c2.dtype == 'U3'
def test_insert_string_type_error(self, Column):
c = Column([1, 2])
with pytest.raises(ValueError, match='invalid literal for int'):
c.insert(0, 'string')
c = Column(['a', 'b'])
with pytest.raises(TypeError, match='string operation on non-string array'):
c.insert(0, 1)
def test_insert_multidim(self, Column):
c = Column([[1, 2],
[3, 4]], name='a', dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(['a', 1, None], name='a', dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == np.array(['a', [100, 200], 1, None],
dtype=object))
def test_insert_masked(self):
c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,
mask=[False, True, False])
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_masked_multidim_as_list(self):
data = np.ma.MaskedArray([1, 2], mask=[True, False])
c = table.MaskedColumn([data])
assert c.shape == (1, 2)
assert np.all(c[0].mask == [True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2],
[3, 4]], name='a', dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))
with pytest.raises(AttributeError):
t['a'].mask = [True, False]
class TestAttrEqual():
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy')
c2 = Column(name='a', dtype=int, unit='mJy')
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='b', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=float, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%g',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='another test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'e': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 9, 'd': 12})
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
c = table.MaskedColumn(data=[1, 2], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
# As above, but with take() - check the method and the function
c = table.Column(data=[1, 2, 3], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name='a')
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == '1.5'
assert str(c) == '1.5'
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm / u.s**2)
assert isinstance(qtab['f'], u.Dex)
@pytest.mark.parametrize('masked', [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
from inspect import currentframe, getframeinfo
t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
t['a'][1] = 'cc'
t['a'][:] = 'dd'
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
frameinfo = getframeinfo(currentframe())
t['a'][0] = 'eee' # replace item with string that gets truncated
assert t['a'][0] == 'ee'
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert 'test_column' in w[0].filename
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated
assert np.all(t['a'] == ['ff', 'gg'])
assert len(w) == 1
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(['ffff', 'gggg'])
val[:] = ['f', 'g']
t['a'][:] = val
assert np.all(t['a'] == ['f', 'g'])
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (['a', 'b'], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
mc[1] = np.ma.masked
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(['aa', 'bb'])
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated
assert mc[1] == 'gg'
assert np.all(mc.mask == [True, False])
assert len(w) == 1
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = 'bä'
c = Column([uba, 'def'], dtype='S')
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes_obj(Column):
"""
Create a Column of dtype object with bytestring in it and make sure
it keeps the bytestring and not convert to str with accessed.
"""
c = Column([None, b'def'])
assert c.dtype.char == 'O'
assert not c[0]
assert c[1] == b'def'
assert isinstance(c[1], bytes)
assert not isinstance(c[1], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([None, b'def']))
assert not np.all(c[:2] == np.array([None, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = 'bä'
uba8 = uba.encode('utf-8')
c = Column([uba8, b'def'])
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'S'
# Array / list comparisons
assert np.all(c == [uba, 'def'])
ok = c == [uba8, b'def']
assert type(ok) is type(c.data) # noqa
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c == np.array([uba, 'def']))
assert np.all(c == np.array([uba8, b'def']))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data) # noqa
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
uba = 'bä'
uba8 = uba.encode('utf-8')
c = table.Column([uba, 'def'], dtype='U')
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'U'
ok = c == [uba, 'def']
assert type(ok) == np.ndarray
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c != [uba8, b'def'])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b'abc', b'def'])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == 'abc'
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == 'S'
ok = c == ['abc', 'def']
assert ok[0] == True # noqa
assert ok[1] is np.ma.masked
assert np.all(c == [b'abc', b'def'])
assert np.all(c == np.array(['abc', 'def']))
assert np.all(c == np.array([b'abc', b'def']))
for cmp in ('abc', b'abc'):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0] == True # noqa
assert ok[1] is np.ma.masked
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = 'bä'
c = Column([b'abc', b'def'])
c[0] = b'aa'
assert np.all(c == ['aa', 'def'])
c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, 'def'])
assert c.pformat() == ['None', '----', ' ' + uba, ' def']
c[:] = b'cc'
assert np.all(c == ['cc', 'cc'])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ''
c[:] = [uba, b'def']
assert np.all(c == [uba, b'def'])
@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])
@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b'a', b'c'])
if class2 is str:
obj2 = 'a'
elif class2 is list:
obj2 = ['a', 'b']
else:
obj2 = class2(['a', 'b'])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],
mask=[True, False, True, False])
c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],
mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
def test_structured_masked_column_roundtrip():
mc = table.MaskedColumn([(1., 2.), (3., 4.)],
mask=[(False, False), (False, False)], dtype='f8,f8')
assert len(mc.dtype.fields) == 2
mc2 = table.MaskedColumn(mc)
assert_array_equal(mc2, mc)
<|fim▁hole|>@pytest.mark.parametrize('dtype', ['i4,f4', 'f4,(2,)f8'])
def test_structured_empty_column_init(dtype):
dtype = np.dtype(dtype)
c = table.Column(length=5, shape=(2,), dtype=dtype)
assert c.shape == (5, 2)
assert c.dtype == dtype
def test_column_value_access():
"""Can a column's underlying data consistently be accessed via `.value`,
whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?"""
data = np.array([1, 2, 3])
tbl = table.QTable({'a': table.Column(data),
'b': table.MaskedColumn(data),
'c': u.Quantity(data),
'd': time.Time(data, format='mjd')})
assert type(tbl['a'].value) == np.ndarray
assert type(tbl['b'].value) == np.ma.MaskedArray
assert type(tbl['c'].value) == np.ndarray
assert type(tbl['d'].value) == np.ndarray
def test_masked_column_serialize_method_propagation():
mc = table.MaskedColumn([1., 2., 3.], mask=[True, False, True])
assert mc.info.serialize_method['ecsv'] == 'null_value'
mc.info.serialize_method['ecsv'] = 'data_mask'
assert mc.info.serialize_method['ecsv'] == 'data_mask'
mc2 = mc.copy()
assert mc2.info.serialize_method['ecsv'] == 'data_mask'
mc3 = table.MaskedColumn(mc)
assert mc3.info.serialize_method['ecsv'] == 'data_mask'
mc4 = mc.view(table.MaskedColumn)
assert mc4.info.serialize_method['ecsv'] == 'data_mask'
mc5 = mc[1:]
assert mc5.info.serialize_method['ecsv'] == 'data_mask'
@pytest.mark.parametrize('dtype', ['S', 'U', 'i'])
def test_searchsorted(Column, dtype):
c = Column([1, 2, 2, 3], dtype=dtype)
if isinstance(Column, table.MaskedColumn):
# Searchsorted seems to ignore the mask
c[2] = np.ma.masked
if dtype == 'i':
vs = (2, [2, 1])
else:
vs = ('2', ['2', '1'], b'2', [b'2', b'1'])
for v in vs:
v = np.array(v, dtype=dtype)
exp = np.searchsorted(c.data, v, side='right')
res = c.searchsorted(v, side='right')
assert np.all(res == exp)
res = np.searchsorted(c, v, side='right')
assert np.all(res == exp)<|fim▁end|> | |
<|file_name|>script.js<|end_file_name|><|fim▁begin|>$(function () {<|fim▁hole|>})
$(function () {
$('[data-toggle="popover"]').popover()
})<|fim▁end|> | $('[data-toggle="tooltip"]').tooltip() |
<|file_name|>davis_log.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# reflect input bytes to output, printing as it goes
import serial, sys, optparse, time
parser = optparse.OptionParser("davis_log")
parser.add_option("--baudrate", type='int', default=57600, help='baud rate')
opts, args = parser.parse_args()
if len(args) != 2:
print("usage: reflector.py <DEVICE> <logfile>")
sys.exit(1)
device = args[0]
logfile = args[1]
port = serial.Serial(device, opts.baudrate, timeout=5, dsrdtr=False, rtscts=False, xonxoff=False)
log = open(logfile, mode="a")<|fim▁hole|> line = line.rstrip()
out = "%s %.2f\n" % (line, time.time())
log.write(out);
log.flush()
sys.stdout.write(out)
sys.stdout.flush()<|fim▁end|> |
while True:
line = port.readline() |
<|file_name|>WorldMapArea.java<|end_file_name|><|fim▁begin|>package com.gromstudio.treckar.model;
import java.net.URL;
public class WorldMapArea {
URL mBitmapUrl;<|fim▁hole|>
}<|fim▁end|> | |
<|file_name|>var-captured-in-nested-closure.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print variable
// gdb-check:$1 = 1
// gdb-command:print constant
// gdb-check:$2 = 2
// gdb-command:print a_struct
// gdb-check:$3 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *struct_ref
// gdb-check:$4 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *owned
// gdb-check:$5 = 6
// gdb-command:print closure_local
// gdb-check:$6 = 8
// gdb-command:continue
// gdb-command:print variable
// gdb-check:$7 = 1
// gdb-command:print constant
// gdb-check:$8 = 2
// gdb-command:print a_struct
// gdb-check:$9 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *struct_ref
// gdb-check:$10 = {a = -3, b = 4.5, c = 5}
// gdb-command:print *owned
// gdb-check:$11 = 6
// gdb-command:print closure_local
// gdb-check:$12 = 8
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print variable
// lldb-check:[...]$0 = 1
// lldb-command:print constant
// lldb-check:[...]$1 = 2
// lldb-command:print a_struct
// lldb-check:[...]$2 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *struct_ref
// lldb-check:[...]$3 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *owned
// lldb-check:[...]$4 = 6
// lldb-command:print closure_local
// lldb-check:[...]$5 = 8
// lldb-command:continue
// lldb-command:print variable
// lldb-check:[...]$6 = 1
// lldb-command:print constant
// lldb-check:[...]$7 = 2
// lldb-command:print a_struct
// lldb-check:[...]$8 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *struct_ref
// lldb-check:[...]$9 = Struct { a: -3, b: 4.5, c: 5 }
// lldb-command:print *owned
// lldb-check:[...]$10 = 6
// lldb-command:print closure_local
// lldb-check:[...]$11 = 8
// lldb-command:continue
#![allow(unused_variables)]
struct Struct {
a: int,
b: f64,
c: uint
}
fn main() {
let mut variable = 1;
let constant = 2;
let a_struct = Struct {
a: -3,
b: 4.5,
c: 5
};
let struct_ref = &a_struct;
let owned = box 6;
let closure = || {
let closure_local = 8;
let nested_closure = || {
zzz(); // #break
variable = constant + a_struct.a + struct_ref.a + *owned + closure_local;
};
zzz(); // #break
nested_closure();
};
closure();
}
fn zzz() {()}<|fim▁end|> | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
<|file_name|>ex-walking-01.js<|end_file_name|><|fim▁begin|>// Copyright © 2014 John Watson
// Licensed under the terms of the MIT License
var GameState = function(game) {
};
// Load images and sounds
GameState.prototype.preload = function() {
this.game.load.image('ground', 'assets/gfx/ground.png');
this.game.load.image('player', 'assets/gfx/player.png');
};
// Setup the example
GameState.prototype.create = function() {
// Set stage background to something sky colored
this.game.stage.backgroundColor = 0x4488cc;
// Define movement constants
this.MAX_SPEED = 500; // pixels/second
// Create a player sprite
this.player = this.game.add.sprite(this.game.width/2, this.game.height - 64, 'player');
// Enable physics on the player
this.game.physics.enable(this.player, Phaser.Physics.ARCADE);
// Make player collide with world boundaries so he doesn't leave the stage
this.player.body.collideWorldBounds = true;
<|fim▁hole|> // platforms may not need code like this.
this.game.input.keyboard.addKeyCapture([
Phaser.Keyboard.LEFT,
Phaser.Keyboard.RIGHT,
Phaser.Keyboard.UP,
Phaser.Keyboard.DOWN
]);
// Create some ground for the player to walk on
this.ground = this.game.add.group();
for(var x = 0; x < this.game.width; x += 32) {
// Add the ground blocks, enable physics on each, make them immovable
var groundBlock = this.game.add.sprite(x, this.game.height - 32, 'ground');
this.game.physics.enable(groundBlock, Phaser.Physics.ARCADE);
groundBlock.body.immovable = true;
groundBlock.body.allowGravity = false;
this.ground.add(groundBlock);
}
};
// The update() method is called every frame
GameState.prototype.update = function() {
// Collide the player with the ground
this.game.physics.arcade.collide(this.player, this.ground);
if (this.leftInputIsActive()) {
// If the LEFT key is down, set the player velocity to move left
this.player.body.velocity.x = -this.MAX_SPEED;
} else if (this.rightInputIsActive()) {
// If the RIGHT key is down, set the player velocity to move right
this.player.body.velocity.x = this.MAX_SPEED;
} else {
// Stop the player from moving horizontally
this.player.body.velocity.x = 0;
}
};
// This function should return true when the player activates the "go left" control
// In this case, either holding the right arrow or tapping or clicking on the left
// side of the screen.
GameState.prototype.leftInputIsActive = function() {
var isActive = false;
isActive = this.input.keyboard.isDown(Phaser.Keyboard.LEFT);
isActive |= (this.game.input.activePointer.isDown &&
this.game.input.activePointer.x < this.game.width/4);
return isActive;
};
// This function should return true when the player activates the "go right" control
// In this case, either holding the right arrow or tapping or clicking on the right
// side of the screen.
GameState.prototype.rightInputIsActive = function() {
var isActive = false;
isActive = this.input.keyboard.isDown(Phaser.Keyboard.RIGHT);
isActive |= (this.game.input.activePointer.isDown &&
this.game.input.activePointer.x > this.game.width/2 + this.game.width/4);
return isActive;
};
var game = new Phaser.Game(640, 320, Phaser.AUTO, 'game');
game.state.add('game', GameState, true);<|fim▁end|> | // Capture certain keys to prevent their default actions in the browser.
// This is only necessary because this is an HTML5 game. Games on other |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::io;
fn stdinln_i32() -> i32 {
// 이 함수는 하나의 줄을 stdin으로 받아 단 하나의 integer를 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read stdin.");
buffer.trim().parse::<i32>().unwrap()
}
fn stdinln_vec_i32() -> Vec<i32> {
// 이 함수는 하나의 줄을 stdin으로 받아 여러개의 integer 을 vector로 리턴합니다.
let mut buffer = String::new();
std::io::stdin().read_line(&mut buffer).expect("Failed to read line");
let ret: Vec<i32> = buffer.split(" ")
.map(|x| x.trim().parse().expect("Unexpected Integer Pattern"))
.collect();
ret
}
struct TelBox {
data: Vec<i32>,
nr_tel: usize,
cntbox: [u32; 10000], // 컴파일 시간내에 사이즈가 결정되는 어레이, Vector보다 빠르다
}
impl TelBox {
fn new_from_vec_i32(inputs: Vec<i32>, len: usize) -> TelBox {
TelBox{data:inputs, nr_tel:len, cntbox:[0; 10000]}
}
fn get_most(&mut self) -> u32 {
// TelBox::get_most() 를 완성해주세요!
for i in 0..self.nr_tel {
self.cntbox[self.data[i] as usize] += 1;
}
let mut biggest = 0;
for i in 1..10000 {
if self.cntbox[i] > self.cntbox[biggest] {
biggest = i;
}else if self.cntbox[i] == self.cntbox[biggest] {
if i < biggest {
biggest = i;
}
}
}
biggest as u32
// End of TelBox::get_most()
}
}
// Rust에서는 array사용시 컴파일시간내에서만 결정되어야한다.
// 이 문제같은 경우 0~10000이라는 조건이 컴파일 시간이전에 결정된다.
// 따라서 이 문제는 Vector가 아닌 Array를 사용한다.
// 힌트 : https://doc.rust-lang.org/std/primitive.array.html
fn main() {
let nr_tel = stdinln_i32() as usize;
let mut tels: Vec<i32> = Vec::new();
for i in 0..nr_tel {
tels.push(stdinln_i32());<|fim▁hole|> // TelBox::get_most()를 완성해주세요!
println!("{:04}", problem.get_most());
}<|fim▁end|> | }
let mut problem = TelBox::new_from_vec_i32(tels, nr_tel); |
<|file_name|>type-alias-impl-trait-fn-type.rs<|end_file_name|><|fim▁begin|>#![feature(type_alias_impl_trait)]
#![allow(dead_code)]
// FIXME: this is ruled out for now but should work
type Foo = fn() -> impl Send;
//~^ ERROR: `impl Trait` not allowed outside of function and method return types<|fim▁hole|>
fn main() {}<|fim▁end|> |
fn make_foo() -> Foo {
|| 15
} |
<|file_name|>designer-controller.js<|end_file_name|><|fim▁begin|>/*
* Electronic Logistics Management Information System (eLMIS) is a supply chain management system for health commodities in a developing country setting.
*
* Copyright (C) 2015 John Snow, Inc (JSI). This program was produced for the U.S. Agency for International Development (USAID). It was prepared under the USAID | DELIVER PROJECT, Task Order 4.
*<|fim▁hole|> * You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
function CustomReportDesignerController($scope, reports, SaveCustomReport, CustomReportFullList){
$scope.r = reports;
$scope.reports = _.groupBy( $scope.r, 'category');
$scope.init = function(){
if($scope.sqleditor === undefined){
$scope.sqleditor = ace.edit("sqleditor");
$scope.sqleditor.setTheme("ace/theme/chrome");
$scope.sqleditor.getSession().setMode("ace/mode/pgsql");
$scope.filter = ace.edit("filtereditor");
$scope.filter.setTheme("ace/theme/chrome");
$scope.filter.getSession().setMode("ace/mode/json");
$scope.column = ace.edit("columneditor");
$scope.column.setTheme("ace/theme/chrome");
$scope.column.getSession().setMode("ace/mode/json");
$scope.meta = ace.edit("metaeditor");
$scope.meta.setTheme("ace/theme/chrome");
$scope.meta.getSession().setMode("ace/mode/html");
}
$scope.sqleditor.setValue($scope.current.query);
$scope.filter.setValue($scope.current.filters);
$scope.column.setValue($scope.current.columnoptions);
$scope.meta.setValue($scope.current.meta);
};
$scope.select = function(report){
// clear previous values and message on screen
$scope.columns = $scope.data = [];
$scope.message = undefined;
$scope.current = report;
$scope.init();
};
$scope.New = function(){
$scope.current = {quer:'', filters:'[]',columnoptions:'[]'};
$scope.init();
};
$scope.Save = function(){
$scope.current.query = $scope.sqleditor.getValue();
$scope.current.filters = $scope.filter.getValue();
$scope.current.columnoptions = $scope.column.getValue();
$scope.current.meta = $scope.meta.getValue();
var save = SaveCustomReport.save($scope.current);
save.$promise.then(function(){
$scope.message = $scope.current.name + ' saved successfully!';
$scope.current = undefined;
$scope.r = CustomReportFullList.get();
$scope.r.$promise.then(function(){
$scope.reports = _.groupBy( $scope.r.reports, 'category');
});
});
};
}
CustomReportDesignerController.resolve = {
reports: function ($q, $timeout, CustomReportFullList) {
var deferred = $q.defer();
$timeout(function () {
CustomReportFullList.get(function (data) {
deferred.resolve(data.reports);
});
}, 100);
return deferred.promise;
}
};<|fim▁end|> | * This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
* |
<|file_name|>sti.go<|end_file_name|><|fim▁begin|>package builder
import (
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
stiapi "github.com/openshift/source-to-image/pkg/api"
sti "github.com/openshift/source-to-image/pkg/build/strategies"
"github.com/openshift/origin/pkg/build/api"
)
// STIBuilder performs an STI build given the build object
type STIBuilder struct {
dockerClient DockerClient
dockerSocket string
authPresent bool
auth docker.AuthConfiguration
build *api.Build
}
// NewSTIBuilder creates a new STIBuilder instance
func NewSTIBuilder(client DockerClient, dockerSocket string, authCfg docker.AuthConfiguration, authPresent bool, build *api.Build) *STIBuilder {
return &STIBuilder{
dockerClient: client,
dockerSocket: dockerSocket,
authPresent: authPresent,
auth: authCfg,<|fim▁hole|> }
}
// Build executes the STI build
func (s *STIBuilder) Build() error {
tag := s.build.Parameters.Output.DockerImageReference
request := &stiapi.Request{
BaseImage: s.build.Parameters.Strategy.STIStrategy.Image,
DockerSocket: s.dockerSocket,
Source: s.build.Parameters.Source.Git.URI,
ContextDir: s.build.Parameters.Source.ContextDir,
Tag: tag,
ScriptsURL: s.build.Parameters.Strategy.STIStrategy.Scripts,
Environment: getBuildEnvVars(s.build),
Incremental: s.build.Parameters.Strategy.STIStrategy.Incremental,
}
if s.build.Parameters.Revision != nil && s.build.Parameters.Revision.Git != nil &&
s.build.Parameters.Revision.Git.Commit != "" {
request.Ref = s.build.Parameters.Revision.Git.Commit
} else if s.build.Parameters.Source.Git.Ref != "" {
request.Ref = s.build.Parameters.Source.Git.Ref
}
glog.V(2).Infof("Creating a new STI builder with build request: %#v\n", request)
builder, err := sti.GetStrategy(request)
if err != nil {
return err
}
defer removeImage(s.dockerClient, tag)
if _, err = builder.Build(request); err != nil {
return err
}
if len(s.build.Parameters.Output.DockerImageReference) != 0 {
return pushImage(s.dockerClient, tag, s.auth)
}
return nil
}<|fim▁end|> | build: build, |
<|file_name|>FacetParams.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.params;
import java.util.EnumSet;
import java.util.Locale;
import org.apache.solr.common.SolrException;
/** Facet parameters */
public interface FacetParams {
/** Should facet counts be calculated? */
public static final String FACET = "facet";
/**
* Numeric option indicating the maximum number of threads to be used in counting facet field
* vales
*/
public static final String FACET_THREADS = FACET + ".threads";
/** What method should be used to do the faceting */
public static final String FACET_METHOD = FACET + ".method";
/**
* Value for FACET_METHOD param to indicate that Solr should enumerate over terms in a field to
* calculate the facet counts.
*/
public static final String FACET_METHOD_enum = "enum";
/**
* Value for FACET_METHOD param to indicate that Solr should enumerate over documents and count up
* terms by consulting an uninverted representation of the field values (such as the FieldCache
* used for sorting).
*/
public static final String FACET_METHOD_fc = "fc";
/** Value for FACET_METHOD param, like FACET_METHOD_fc but counts per-segment. */
public static final String FACET_METHOD_fcs = "fcs";
/** Value for FACET_METHOD param to indicate that Solr should use an UnInvertedField */
public static final String FACET_METHOD_uif = "uif";
/**
* Any lucene formated queries the user would like to use for Facet Constraint Counts
* (multi-value)
*/
public static final String FACET_QUERY = FACET + ".query";
/**
* Any field whose terms the user wants to enumerate over for Facet Constraint Counts
* (multi-value)
*/
public static final String FACET_FIELD = FACET + ".field";
/** The offset into the list of facets. Can be overridden on a per field basis. */
public static final String FACET_OFFSET = FACET + ".offset";
/**
* Numeric option indicating the maximum number of facet field counts be included in the response
* for each field - in descending order of count. Can be overridden on a per field basis.
*/
public static final String FACET_LIMIT = FACET + ".limit";
/**
* Numeric option indicating the minimum number of hits before a facet should be included in the
* response. Can be overridden on a per field basis.
*/
public static final String FACET_MINCOUNT = FACET + ".mincount";
/**
* Boolean option indicating whether facet field counts of "0" should be included in the response.
* Can be overridden on a per field basis.
*/
public static final String FACET_ZEROS = FACET + ".zeros";
/**
* Boolean option indicating whether the response should include a facet field count for all
* records which have no value for the facet field. Can be overridden on a per field basis.
*/
public static final String FACET_MISSING = FACET + ".missing";
static final String FACET_OVERREQUEST = FACET + ".overrequest";
/**
* The percentage to over-request by when performing initial distributed requests.
*
* <p>default value is 1.5
*/
public static final String FACET_OVERREQUEST_RATIO = FACET_OVERREQUEST + ".ratio";
/**
* An additional amount to over-request by when performing initial distributed requests. This
* value will be added after accounting for the over-request ratio.
*
* <p>default value is 10
*/
public static final String FACET_OVERREQUEST_COUNT = FACET_OVERREQUEST + ".count";
/**
* Comma separated list of fields to pivot
*
* <p>example: author,type (for types by author / types within author)
*/
public static final String FACET_PIVOT = FACET + ".pivot";
/**
* Minimum number of docs that need to match to be included in the sublist
*
* <p>default value is 1
*/
public static final String FACET_PIVOT_MINCOUNT = FACET_PIVOT + ".mincount";
/**
* String option: "count" causes facets to be sorted by the count, "index" results in index order.
*/
public static final String FACET_SORT = FACET + ".sort";
public static final String FACET_SORT_COUNT = "count";
public static final String FACET_SORT_COUNT_LEGACY = "true";
public static final String FACET_SORT_INDEX = "index";
public static final String FACET_SORT_INDEX_LEGACY = "false";
/** Only return constraints of a facet field with the given prefix. */
public static final String FACET_PREFIX = FACET + ".prefix";
/** Only return constraints of a facet field containing the given string. */
public static final String FACET_CONTAINS = FACET + ".contains";
/** Only return constraints of a facet field containing the given string. */
public static final String FACET_MATCHES = FACET + ".matches";
/** If using facet contains, ignore case when comparing values. */
public static final String FACET_CONTAINS_IGNORE_CASE = FACET_CONTAINS + ".ignoreCase";
/** Only return constraints of a facet field excluding the given string. */
public static final String FACET_EXCLUDETERMS = FACET + ".excludeTerms";
/**
* When faceting by enumerating the terms in a field, only use the filterCache for terms with a df
* >= to this parameter.
*/
public static final String FACET_ENUM_CACHE_MINDF = FACET + ".enum.cache.minDf";
/**
* A boolean parameter that caps the facet counts at 1. With this set, a returned count will only
* be 0 or 1. For apps that don't need the count, this should be an optimization
*/
public static final String FACET_EXISTS = FACET + ".exists";
/**
* Any field whose terms the user wants to enumerate over for Facet Contraint Counts (multi-value)
*/
public static final String FACET_DATE = FACET + ".date";
/**
* Date string indicating the starting point for a date facet range. Can be overridden on a per
* field basis.
*/
public static final String FACET_DATE_START = FACET_DATE + ".start";
/**
* Date string indicating the ending point for a date facet range. Can be overridden on a per
* field basis.
*/
public static final String FACET_DATE_END = FACET_DATE + ".end";
/**
* Date Math string indicating the interval of sub-ranges for a date facet range. Can be
* overridden on a per field basis.
*/
public static final String FACET_DATE_GAP = FACET_DATE + ".gap";
/**
* Boolean indicating how counts should be computed if the range between 'start' and 'end' is not
* evenly divisible by 'gap'. If this value is true, then all counts of ranges involving the 'end'
* point will use the exact endpoint specified -- this includes the 'between' and 'after' counts
* as well as the last range computed using the 'gap'. If the value is false, then 'gap' is used
* to compute the effective endpoint closest to the 'end' param which results in the range between
* 'start' and 'end' being evenly divisible by 'gap'.
*
* <p>The default is false.
*
* <p>Can be overridden on a per field basis.
*/
public static final String FACET_DATE_HARD_END = FACET_DATE + ".hardend";
/**
* String indicating what "other" ranges should be computed for a date facet range (multi-value).
*
* <p>Can be overridden on a per field basis.
*
* @see FacetRangeOther
*/
public static final String FACET_DATE_OTHER = FACET_DATE + ".other";
/**
* Multivalued string indicating what rules should be applied to determine when the ranges
* generated for date faceting should be inclusive or exclusive of their end points.
*
* <p>The default value if none are specified is: [lower,upper,edge] <i>(NOTE: This is different
* then FACET_RANGE_INCLUDE)</i>
*
* <p>Can be overridden on a per field basis.
*
* @see FacetRangeInclude
* @see #FACET_RANGE_INCLUDE
*/
public static final String FACET_DATE_INCLUDE = FACET_DATE + ".include";
/**
* Any numerical field whose terms the user wants to enumerate over Facet Contraint Counts for
* selected ranges.
*/
public static final String FACET_RANGE = FACET + ".range";
/**
* Number indicating the starting point for a numerical range facet. Can be overridden on a per
* field basis.
*/
public static final String FACET_RANGE_START = FACET_RANGE + ".start";
/**
* Number indicating the ending point for a numerical range facet. Can be overridden on a per
* field basis.
*/
public static final String FACET_RANGE_END = FACET_RANGE + ".end";
/**
* Number indicating the interval of sub-ranges for a numerical facet range. Can be overridden on
* a per field basis.
*/
public static final String FACET_RANGE_GAP = FACET_RANGE + ".gap";
/**
* Boolean indicating how counts should be computed if the range between 'start' and 'end' is not
* evenly divisible by 'gap'. If this value is true, then all counts of ranges involving the 'end'
* point will use the exact endpoint specified -- this includes the 'between' and 'after' counts
* as well as the last range computed using the 'gap'. If the value is false, then 'gap' is used
* to compute the effective endpoint closest to the 'end' param which results in the range between
* 'start' and 'end' being evenly divisible by 'gap'.
*
* <p>The default is false.
*
* <p>Can be overridden on a per field basis.
*/
public static final String FACET_RANGE_HARD_END = FACET_RANGE + ".hardend";
/**
* String indicating what "other" ranges should be computed for a numerical range facet
* (multi-value). Can be overridden on a per field basis.
*/
public static final String FACET_RANGE_OTHER = FACET_RANGE + ".other";
/**
* Multivalued string indicating what rules should be applied to determine when the ranges
* generated for numeric faceting should be inclusive or exclusive of their end points.
*
* <p>The default value if none are specified is: lower
*
* <p>Can be overridden on a per field basis.
*
* @see FacetRangeInclude
*/
public static final String FACET_RANGE_INCLUDE = FACET_RANGE + ".include";
/**
* String indicating the method to use to resolve range facets.
*
* <p>Can be overridden on a per field basis.
*
* @see FacetRangeMethod
*/
public static final String FACET_RANGE_METHOD = FACET_RANGE + ".method";
/** Any field whose values the user wants to enumerate as explicit intervals of terms. */
public static final String FACET_INTERVAL = FACET + ".interval";
/** Set of terms for a single interval to facet on. */
public static final String FACET_INTERVAL_SET = FACET_INTERVAL + ".set";
/**
* A spatial RPT field to generate a 2D "heatmap" (grid of facet counts) on. Just like the other
* faceting types, this may include a 'key' or local-params to facet multiple times. All
* parameters with this suffix can be overridden on a per-field basis.
*/
public static final String FACET_HEATMAP = "facet.heatmap";
/** The format of the heatmap: either png or ints2D (default). */
public static final String FACET_HEATMAP_FORMAT = FACET_HEATMAP + ".format";
/**
* The region the heatmap should minimally enclose. It defaults to the world if not set. The
* format can either be a minimum to maximum point range format:
*
* <pre>["-150 10" TO "-100 30"]</pre>
*
* (the first is bottom-left and second is bottom-right, both of which are parsed as points are
* parsed). OR, any WKT can be provided and it's bounding box will be taken.
*/
public static final String FACET_HEATMAP_GEOM = FACET_HEATMAP + ".geom";
/**
* Specify the heatmap grid level explicitly, instead of deriving it via distErr or distErrPct.
*/
public static final String FACET_HEATMAP_LEVEL = FACET_HEATMAP + ".gridLevel";
/**
* Used to determine the heatmap grid level to compute, defaulting to 0.15. It has the same
* interpretation of distErrPct when searching on RPT, but relative to the shape in 'bbox'. It's a
* fraction (not a %) of the radius of the shape that grid squares must fit into without
* exceeding. > 0 and <= 0.5. Mutually exclusive with distErr & gridLevel.
*/
public static final String FACET_HEATMAP_DIST_ERR_PCT = FACET_HEATMAP + ".distErrPct";
/**
* Used to determine the heatmap grid level to compute (optional). It has the same interpretation
* of maxDistErr or distErr with RPT. It's an absolute distance (in units of what's specified on
* the field type) that a grid square must maximally fit into (width & height). It can be used
* to to more explicitly specify the maximum grid square size without knowledge of what particular
* grid levels translate to. This can in turn be used with knowledge of the size of 'bbox' to get
* a target minimum number of grid cells. Mutually exclusive with distErrPct & gridLevel.
*/
public static final String FACET_HEATMAP_DIST_ERR = FACET_HEATMAP + ".distErr";
/**
* The maximum number of cells (grid squares) the client is willing to handle. If this limit would
* be exceeded, we throw an error instead. Defaults to 100k.
*/
public static final String FACET_HEATMAP_MAX_CELLS = FACET_HEATMAP + ".maxCells";
/**
* An enumeration of the legal values for {@link #FACET_RANGE_OTHER} and {@link #FACET_DATE_OTHER}
* ...
*<|fim▁hole|> * <ul>
* <li>before = the count of matches before the start
* <li>after = the count of matches after the end
* <li>between = the count of all matches between start and end
* <li>all = all of the above (default value)
* <li>none = no additional info requested
* </ul>
*
* @see #FACET_RANGE_OTHER
* @see #FACET_DATE_OTHER
*/
public enum FacetRangeOther {
BEFORE,
AFTER,
BETWEEN,
ALL,
NONE;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
public static FacetRangeOther get(String label) {
try {
return valueOf(label.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new SolrException(
SolrException.ErrorCode.BAD_REQUEST,
label + " is not a valid type of 'other' range facet information",
e);
}
}
}
/**
* An enumeration of the legal values for {@link #FACET_DATE_INCLUDE} and {@link
* #FACET_RANGE_INCLUDE} <br>
*
* <ul>
* <li>lower = all gap based ranges include their lower bound
* <li>upper = all gap based ranges include their upper bound
* <li>edge = the first and last gap ranges include their edge bounds (ie: lower for the first
* one, upper for the last one) even if the corresponding upper/lower option is not
* specified
* <li>outer = the BEFORE and AFTER ranges should be inclusive of their bounds, even if the
* first or last ranges already include those boundaries.
* <li>all = shorthand for lower, upper, edge, and outer
* </ul>
*
* @see #FACET_DATE_INCLUDE
* @see #FACET_RANGE_INCLUDE
*/
public enum FacetRangeInclude {
ALL,
LOWER,
UPPER,
EDGE,
OUTER;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
public static FacetRangeInclude get(String label) {
try {
return valueOf(label.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new SolrException(
SolrException.ErrorCode.BAD_REQUEST,
label + " is not a valid type of for range 'include' information",
e);
}
}
/**
* Convinience method for parsing the param value according to the correct semantics and
* applying the default of "LOWER"
*/
public static EnumSet<FacetRangeInclude> parseParam(final String[] param) {
// short circut for default behavior
if (null == param || 0 == param.length) return EnumSet.of(LOWER);
// build up set containing whatever is specified
final EnumSet<FacetRangeInclude> include = EnumSet.noneOf(FacetRangeInclude.class);
for (final String o : param) {
include.add(FacetRangeInclude.get(o));
}
// if set contains all, then we're back to short circuting
if (include.contains(FacetRangeInclude.ALL)) return EnumSet.allOf(FacetRangeInclude.class);
// use whatever we've got.
return include;
}
}
/**
* An enumeration of the legal values for {@link #FACET_RANGE_METHOD}
*
* <ul>
* <li>filter =
* <li>dv =
* </ul>
*
* @see #FACET_RANGE_METHOD
*/
public enum FacetRangeMethod {
FILTER,
DV;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
public static FacetRangeMethod get(String label) {
try {
return valueOf(label.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
throw new SolrException(
SolrException.ErrorCode.BAD_REQUEST,
label + " is not a valid method for range faceting",
e);
}
}
public static FacetRangeMethod getDefault() {
return FILTER;
}
}
}<|fim▁end|> | |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>use std::error::Error;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::path::Path;
use std::collections::HashMap;
extern crate regex;
use regex::Regex;
enum Ineq {
Equals(i32),
GreaterThan(i32),
LessThan(i32),
}
// --------------------------------------------------------
fn find_sue (constraints: &HashMap<&str, Ineq>, filename: &str) -> i32 {
let path = Path::new(filename);
let display = path.display();
// Open the path in read-only mode, returns `io::Result<File>`
let file = match File::open(&path) {
// The `description` method of `io::Error` returns a string that describes the error
Err(why) => panic!("couldn't open {}: {}", display, Error::description(&why)),
Ok(file) => file,
};
let reader = BufReader::new(file);
let lines = reader.lines();
let re = Regex::new(r"(?P<key>[:alpha:]+): (?P<value>\d+)").unwrap();
let mut sue_num = 0;
let mut sue_no_conflict = -1i32;
for line in lines {
let text = line.unwrap();
sue_num += 1;
let mut has_conflict = false;
for cap in re.captures_iter(&text) {
let key = cap.name("key").unwrap_or("");
let value = cap.name("value").unwrap_or("").parse::<i32>().unwrap();<|fim▁hole|> match constraints.get(&key) {
Some(&Ineq::Equals(present_value)) => {
if value != present_value {
has_conflict = true;
}
},
Some(&Ineq::GreaterThan(present_value)) => {
if value <= present_value {
has_conflict = true;
}
},
Some(&Ineq::LessThan(present_value)) => {
if value >= present_value {
has_conflict = true;
}
},
_ => {},
}
}
if !has_conflict {
println!("Sue {} has no conflicts", sue_num);
sue_no_conflict = sue_num;
}
}
sue_no_conflict
}
// --------------------------------------------------------
fn main() {
println!("Running part 1...");
let mut sue_stats_exact = HashMap::new();
sue_stats_exact.insert("children", Ineq::Equals(3) );
sue_stats_exact.insert("cats", Ineq::Equals(7) );
sue_stats_exact.insert("samoyeds", Ineq::Equals(2) );
sue_stats_exact.insert("pomeranians", Ineq::Equals(3) );
sue_stats_exact.insert("akitas", Ineq::Equals(0) );
sue_stats_exact.insert("vizslas", Ineq::Equals(0) );
sue_stats_exact.insert("goldfish", Ineq::Equals(5) );
sue_stats_exact.insert("trees", Ineq::Equals(3) );
sue_stats_exact.insert("cars", Ineq::Equals(2) );
sue_stats_exact.insert("perfumes", Ineq::Equals(1) );
find_sue(&sue_stats_exact, "day16.txt");
println!("Running part 2...");
let mut sue_stats_ineq = HashMap::new();
sue_stats_ineq.insert("children", Ineq::Equals(3) );
sue_stats_ineq.insert("cats", Ineq::GreaterThan(7) );
sue_stats_ineq.insert("samoyeds", Ineq::Equals(2) );
sue_stats_ineq.insert("pomeranians", Ineq::LessThan(3) );
sue_stats_ineq.insert("akitas", Ineq::Equals(0) );
sue_stats_ineq.insert("vizslas", Ineq::Equals(0) );
sue_stats_ineq.insert("goldfish", Ineq::LessThan(5) );
sue_stats_ineq.insert("trees", Ineq::GreaterThan(3) );
sue_stats_ineq.insert("cars", Ineq::Equals(2) );
sue_stats_ineq.insert("perfumes", Ineq::Equals(1) );
find_sue(&sue_stats_ineq, "day16.txt");
}<|fim▁end|> | |
<|file_name|>sync.py<|end_file_name|><|fim▁begin|>__copyright__ = """
Copyright (C) 2006, Catalin Marinas <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os
import stgit.commands.common
from stgit.argparse import opt
from stgit.commands.common import *
from stgit.utils import *
from stgit.out import *
from stgit import argparse, stack, git
help = 'Synchronise patches with a branch or a series'
kind = 'patch'
usage = ['[options] [<patch1>] [<patch2>] [<patch3>..<patch4>]']
description = """
For each of the specified patches perform a three-way merge with the
same patch in the specified branch or series. The command can be used
for keeping patches on several branches in sync. Note that the
operation may fail for some patches because of conflicts. The patches
in the series must apply cleanly."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('-a', '--all', action = 'store_true',
short = 'Synchronise all the applied patches'),
opt('-B', '--ref-branch', args = [argparse.stg_branches],
short = 'Syncronise patches with BRANCH'),
opt('-s', '--series', args = [argparse.files],
short = 'Syncronise patches with SERIES')]
directory = DirectoryGotoToplevel(log = True)
def __check_all():
check_local_changes()
check_conflicts()
check_head_top_equal(crt_series)
def __branch_merge_patch(remote_series, pname):
"""Merge a patch from a remote branch into the current tree.
"""
patch = remote_series.get_patch(pname)
git.merge_recursive(patch.get_bottom(), git.get_head(), patch.get_top())
def __series_merge_patch(base, patchdir, pname):
"""Merge a patch file with the given StGIT patch.
"""
patchfile = os.path.join(patchdir, pname)
git.apply_patch(filename = patchfile, base = base)
def func(parser, options, args):
"""Synchronise a range of patches
"""
if options.ref_branch:
remote_series = stack.Series(options.ref_branch)
if options.ref_branch == crt_series.get_name():
raise CmdException, 'Cannot synchronise with the current branch'
remote_patches = remote_series.get_applied()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__branch_merge_patch(remote_series, pname)
elif options.series:
patchdir = os.path.dirname(options.series)
remote_patches = []
f = file(options.series)
for line in f:
p = re.sub('#.*$', '', line).strip()
if not p:
continue
remote_patches.append(p)
f.close()
# the merge function merge_patch(patch, pname)
merge_patch = lambda patch, pname: \
__series_merge_patch(patch.get_bottom(), patchdir, pname)
else:
raise CmdException, 'No remote branch or series specified'
applied = crt_series.get_applied()
unapplied = crt_series.get_unapplied()
if options.all:
patches = applied
elif len(args) != 0:
patches = parse_patches(args, applied + unapplied, len(applied),
ordered = True)
elif applied:
patches = [crt_series.get_current()]
else:
parser.error('no patches applied')
if not patches:
raise CmdException, 'No patches to synchronise'
__check_all()
# only keep the patches to be synchronised
sync_patches = [p for p in patches if p in remote_patches]
if not sync_patches:
raise CmdException, 'No common patches to be synchronised'
# pop to the one before the first patch to be synchronised
first_patch = sync_patches[0]
if first_patch in applied:
to_pop = applied[applied.index(first_patch) + 1:]
if to_pop:
pop_patches(crt_series, to_pop[::-1])
pushed = [first_patch]
else:
to_pop = []
pushed = []
popped = to_pop + [p for p in patches if p in unapplied]
for p in pushed + popped:
if p in popped:
# push this patch
push_patches(crt_series, [p])
if p not in sync_patches:
# nothing to synchronise
continue
# the actual sync
out.start('Synchronising "%s"' % p)
patch = crt_series.get_patch(p)
bottom = patch.get_bottom()<|fim▁hole|> top = patch.get_top()
# reset the patch backup information.
patch.set_top(top, backup = True)
# the actual merging (either from a branch or an external file)
merge_patch(patch, p)
if git.local_changes(verbose = False):
# index (cache) already updated by the git merge. The
# backup information was already reset above
crt_series.refresh_patch(cache_update = False, backup = False,
log = 'sync')
out.done('updated')
else:
out.done()<|fim▁end|> | |
<|file_name|>oobe_ui.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/ui/webui/chromeos/login/oobe_ui.h"
#include "ash/ash_switches.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/memory/ref_counted_memory.h"
#include "base/values.h"
#include "chrome/browser/browser_about_handler.h"
#include "chrome/browser/chrome_notification_types.h"
#include "chrome/browser/chromeos/kiosk_mode/kiosk_mode_settings.h"
#include "chrome/browser/chromeos/login/enrollment/enrollment_screen_actor.h"
#include "chrome/browser/chromeos/login/login_display_host_impl.h"
#include "chrome/browser/chromeos/login/screen_locker.h"
#include "chrome/browser/chromeos/login/user_manager.h"
#include "chrome/browser/chromeos/login/wizard_controller.h"
#include "chrome/browser/chromeos/system/input_device_settings.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/ui/webui/about_ui.h"
#include "chrome/browser/ui/webui/chromeos/login/app_launch_splash_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/base_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/enrollment_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/error_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/eula_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/gaia_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/kiosk_app_menu_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/kiosk_autolaunch_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/kiosk_enable_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/locally_managed_user_creation_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/network_dropdown_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/network_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/network_state_informer.h"
#include "chrome/browser/ui/webui/chromeos/login/reset_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/signin_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/terms_of_service_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/update_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/user_image_screen_handler.h"
#include "chrome/browser/ui/webui/chromeos/login/wrong_hwid_screen_handler.h"
#include "chrome/browser/ui/webui/options/chromeos/user_image_source.h"
#include "chrome/browser/ui/webui/theme_source.h"
#include "chrome/common/chrome_constants.h"
#include "chrome/common/url_constants.h"
#include "chromeos/chromeos_constants.h"
#include "chromeos/chromeos_switches.h"
#include "content/public/browser/web_ui.h"
#include "content/public/browser/web_ui_data_source.h"
#include "grit/browser_resources.h"
#include "ui/base/resource/resource_bundle.h"
#include "ui/base/webui/web_ui_util.h"
namespace chromeos {
namespace {
// List of known types of OobeUI. Type added as path in chrome://oobe url, for
// example chrome://oobe/user-adding.
const char kOobeDisplay[] = "oobe";
const char kLoginDisplay[] = "login";
const char kLockDisplay[] = "lock";
const char kUserAddingDisplay[] = "user-adding";
const char kAppLaunchSplashDisplay[] = "app-launch-splash";
const char* kKnownDisplayTypes[] = {
kOobeDisplay,
kLoginDisplay,
kLockDisplay,
kUserAddingDisplay,
kAppLaunchSplashDisplay
};
const char kStringsJSPath[] = "strings.js";
const char kLoginJSPath[] = "login.js";
const char kOobeJSPath[] = "oobe.js";
const char kKeyboardUtilsJSPath[] = "keyboard_utils.js";
const char kDemoUserLoginJSPath[] = "demo_user_login.js";
// Paths for deferred resource loading.
const char kEnrollmentHTMLPath[] = "enrollment.html";
const char kEnrollmentCSSPath[] = "enrollment.css";
const char kEnrollmentJSPath[] = "enrollment.js";
// Creates a WebUIDataSource for chrome://oobe
content::WebUIDataSource* CreateOobeUIDataSource(
const base::DictionaryValue& localized_strings,
const std::string& display_type) {
content::WebUIDataSource* source =
content::WebUIDataSource::Create(chrome::kChromeUIOobeHost);
source->SetUseJsonJSFormatV2();
source->AddLocalizedStrings(localized_strings);
source->SetJsonPath(kStringsJSPath);
if (chromeos::KioskModeSettings::Get()->IsKioskModeEnabled()) {
source->SetDefaultResource(IDR_DEMO_USER_LOGIN_HTML);
source->AddResourcePath(kDemoUserLoginJSPath, IDR_DEMO_USER_LOGIN_JS);
return source;
}
if (display_type == kOobeDisplay) {
source->SetDefaultResource(IDR_OOBE_HTML);
source->AddResourcePath(kOobeJSPath, IDR_OOBE_JS);
} else {
source->SetDefaultResource(IDR_LOGIN_HTML);
source->AddResourcePath(kLoginJSPath, IDR_LOGIN_JS);
}
source->AddResourcePath(kKeyboardUtilsJSPath, IDR_KEYBOARD_UTILS_JS);
source->OverrideContentSecurityPolicyFrameSrc(
"frame-src chrome://terms/ "
"chrome-extension://mfffpogegjflfpflabcdkioaeobkgjik/;");
// Serve deferred resources.
source->AddResourcePath(kEnrollmentHTMLPath, IDR_OOBE_ENROLLMENT_HTML);
source->AddResourcePath(kEnrollmentCSSPath, IDR_OOBE_ENROLLMENT_CSS);
source->AddResourcePath(kEnrollmentJSPath, IDR_OOBE_ENROLLMENT_JS);
return source;
}
std::string GetDisplayType(const GURL& url) {
std::string path = url.path().size() ? url.path().substr(1) : "";
if (std::find(kKnownDisplayTypes,
kKnownDisplayTypes + arraysize(kKnownDisplayTypes),
path) == kKnownDisplayTypes + arraysize(kKnownDisplayTypes)) {
LOG(ERROR) << "Unknown display type '" << path << "'. Setting default.";
return kLoginDisplay;
}
return path;
}
} // namespace
// static
const char OobeUI::kScreenOobeNetwork[] = "connect";
const char OobeUI::kScreenOobeEula[] = "eula";
const char OobeUI::kScreenOobeUpdate[] = "update";
const char OobeUI::kScreenOobeEnrollment[] = "oauth-enrollment";
const char OobeUI::kScreenGaiaSignin[] = "gaia-signin";
const char OobeUI::kScreenAccountPicker[] = "account-picker";
const char OobeUI::kScreenKioskAutolaunch[] = "autolaunch";
const char OobeUI::kScreenKioskEnable[] = "kiosk-enable";
const char OobeUI::kScreenErrorMessage[] = "error-message";
const char OobeUI::kScreenUserImagePicker[] = "user-image";
const char OobeUI::kScreenTpmError[] = "tpm-error-message";
const char OobeUI::kScreenPasswordChanged[] = "password-changed";
const char OobeUI::kScreenManagedUserCreationFlow[]
= "managed-user-creation";
const char OobeUI::kScreenTermsOfService[] = "terms-of-service";
const char OobeUI::kScreenWrongHWID[] = "wrong-hwid";
const char OobeUI::kScreenAppLaunchSplash[] = "app-launch-splash";
const char OobeUI::kScreenConfirmPassword[] = "confirm-password";
const char OobeUI::kScreenMessageBox[] = "message-box";
OobeUI::OobeUI(content::WebUI* web_ui, const GURL& url)
: WebUIController(web_ui),
core_handler_(NULL),
network_dropdown_handler_(NULL),
update_screen_handler_(NULL),
network_screen_actor_(NULL),
eula_screen_actor_(NULL),
reset_screen_actor_(NULL),
autolaunch_screen_actor_(NULL),
kiosk_enable_screen_actor_(NULL),
wrong_hwid_screen_actor_(NULL),
locally_managed_user_creation_screen_actor_(NULL),
error_screen_handler_(NULL),
signin_screen_handler_(NULL),
terms_of_service_screen_actor_(NULL),
user_image_screen_actor_(NULL),
kiosk_app_menu_handler_(NULL),
current_screen_(SCREEN_UNKNOWN),
ready_(false) {
display_type_ = GetDisplayType(url);
InitializeScreenMaps();
network_state_informer_ = new NetworkStateInformer();
network_state_informer_->Init();
core_handler_ = new CoreOobeHandler(this);
AddScreenHandler(core_handler_);
core_handler_->SetDelegate(this);
network_dropdown_handler_ = new NetworkDropdownHandler();
AddScreenHandler(network_dropdown_handler_);
update_screen_handler_ = new UpdateScreenHandler();
AddScreenHandler(update_screen_handler_);
network_dropdown_handler_->AddObserver(update_screen_handler_);
if (display_type_ == kOobeDisplay) {
NetworkScreenHandler* network_screen_handler =
new NetworkScreenHandler(core_handler_);
network_screen_actor_ = network_screen_handler;
AddScreenHandler(network_screen_handler);
}
EulaScreenHandler* eula_screen_handler = new EulaScreenHandler(core_handler_);
eula_screen_actor_ = eula_screen_handler;
AddScreenHandler(eula_screen_handler);
ResetScreenHandler* reset_screen_handler = new ResetScreenHandler();
reset_screen_actor_ = reset_screen_handler;
AddScreenHandler(reset_screen_handler);
KioskAutolaunchScreenHandler* autolaunch_screen_handler =
new KioskAutolaunchScreenHandler();
autolaunch_screen_actor_ = autolaunch_screen_handler;
AddScreenHandler(autolaunch_screen_handler);
KioskEnableScreenHandler* kiosk_enable_screen_handler =
new KioskEnableScreenHandler();
kiosk_enable_screen_actor_ = kiosk_enable_screen_handler;
AddScreenHandler(kiosk_enable_screen_handler);
LocallyManagedUserCreationScreenHandler*
locally_managed_user_creation_screen_handler =
new LocallyManagedUserCreationScreenHandler();
locally_managed_user_creation_screen_actor_ =
locally_managed_user_creation_screen_handler;
AddScreenHandler(locally_managed_user_creation_screen_handler);
WrongHWIDScreenHandler* wrong_hwid_screen_handler =
new WrongHWIDScreenHandler();
wrong_hwid_screen_actor_ = wrong_hwid_screen_handler;
AddScreenHandler(wrong_hwid_screen_handler);
EnrollmentScreenHandler* enrollment_screen_handler =
new EnrollmentScreenHandler();
enrollment_screen_actor_ = enrollment_screen_handler;
AddScreenHandler(enrollment_screen_handler);
TermsOfServiceScreenHandler* terms_of_service_screen_handler =
new TermsOfServiceScreenHandler;
terms_of_service_screen_actor_ = terms_of_service_screen_handler;
AddScreenHandler(terms_of_service_screen_handler);
UserImageScreenHandler* user_image_screen_handler =
new UserImageScreenHandler();
user_image_screen_actor_ = user_image_screen_handler;
AddScreenHandler(user_image_screen_handler);
error_screen_handler_ = new ErrorScreenHandler(network_state_informer_);
AddScreenHandler(error_screen_handler_);
gaia_screen_handler_ = new GaiaScreenHandler(network_state_informer_);
AddScreenHandler(gaia_screen_handler_);
signin_screen_handler_ = new SigninScreenHandler(network_state_informer_,
error_screen_handler_,
core_handler_,
gaia_screen_handler_);
AddScreenHandler(signin_screen_handler_);
AppLaunchSplashScreenHandler* app_launch_splash_screen_handler =
new AppLaunchSplashScreenHandler(network_state_informer_,
error_screen_handler_);
AddScreenHandler(app_launch_splash_screen_handler);
app_launch_splash_screen_actor_ = app_launch_splash_screen_handler;
// Initialize KioskAppMenuHandler. Note that it is NOT a screen handler.
kiosk_app_menu_handler_ = new KioskAppMenuHandler;
web_ui->AddMessageHandler(kiosk_app_menu_handler_);
base::DictionaryValue localized_strings;
GetLocalizedStrings(&localized_strings);
Profile* profile = Profile::FromWebUI(web_ui);
// Set up the chrome://theme/ source, for Chrome logo.<|fim▁hole|> // Set up the chrome://terms/ data source, for EULA content.
AboutUIHTMLSource* about_source =
new AboutUIHTMLSource(chrome::kChromeUITermsHost, profile);
content::URLDataSource::Add(profile, about_source);
// Set up the chrome://oobe/ source.
content::WebUIDataSource::Add(profile,
CreateOobeUIDataSource(localized_strings,
display_type_));
// Set up the chrome://userimage/ source.
options::UserImageSource* user_image_source =
new options::UserImageSource();
content::URLDataSource::Add(profile, user_image_source);
}
OobeUI::~OobeUI() {
core_handler_->SetDelegate(NULL);
network_dropdown_handler_->RemoveObserver(update_screen_handler_);
}
void OobeUI::ShowScreen(WizardScreen* screen) {
screen->Show();
}
void OobeUI::HideScreen(WizardScreen* screen) {
screen->Hide();
}
UpdateScreenActor* OobeUI::GetUpdateScreenActor() {
return update_screen_handler_;
}
NetworkScreenActor* OobeUI::GetNetworkScreenActor() {
return network_screen_actor_;
}
EulaScreenActor* OobeUI::GetEulaScreenActor() {
return eula_screen_actor_;
}
EnrollmentScreenActor* OobeUI::GetEnrollmentScreenActor() {
return enrollment_screen_actor_;
}
ResetScreenActor* OobeUI::GetResetScreenActor() {
return reset_screen_actor_;
}
KioskAutolaunchScreenActor* OobeUI::GetKioskAutolaunchScreenActor() {
return autolaunch_screen_actor_;
}
KioskEnableScreenActor* OobeUI::GetKioskEnableScreenActor() {
return kiosk_enable_screen_actor_;
}
TermsOfServiceScreenActor* OobeUI::GetTermsOfServiceScreenActor() {
return terms_of_service_screen_actor_;
}
WrongHWIDScreenActor* OobeUI::GetWrongHWIDScreenActor() {
return wrong_hwid_screen_actor_;
}
UserImageScreenActor* OobeUI::GetUserImageScreenActor() {
return user_image_screen_actor_;
}
ErrorScreenActor* OobeUI::GetErrorScreenActor() {
return error_screen_handler_;
}
LocallyManagedUserCreationScreenHandler*
OobeUI::GetLocallyManagedUserCreationScreenActor() {
return locally_managed_user_creation_screen_actor_;
}
AppLaunchSplashScreenActor*
OobeUI::GetAppLaunchSplashScreenActor() {
return app_launch_splash_screen_actor_;
}
void OobeUI::GetLocalizedStrings(base::DictionaryValue* localized_strings) {
// Note, handlers_[0] is a GenericHandler used by the WebUI.
for (size_t i = 0; i < handlers_.size(); ++i)
handlers_[i]->GetLocalizedStrings(localized_strings);
webui::SetFontAndTextDirection(localized_strings);
kiosk_app_menu_handler_->GetLocalizedStrings(localized_strings);
#if defined(GOOGLE_CHROME_BUILD)
localized_strings->SetString("buildType", "chrome");
#else
localized_strings->SetString("buildType", "chromium");
#endif
// If we're not doing boot animation then WebUI should trigger
// wallpaper load on boot.
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kDisableBootAnimation)) {
localized_strings->SetString("bootIntoWallpaper", "on");
} else {
localized_strings->SetString("bootIntoWallpaper", "off");
}
bool keyboard_driven_oobe =
system::keyboard_settings::ForceKeyboardDrivenUINavigation();
localized_strings->SetString("highlightStrength",
keyboard_driven_oobe ? "strong" : "normal");
}
void OobeUI::InitializeScreenMaps() {
screen_names_.resize(SCREEN_UNKNOWN);
screen_names_[SCREEN_OOBE_NETWORK] = kScreenOobeNetwork;
screen_names_[SCREEN_OOBE_EULA] = kScreenOobeEula;
screen_names_[SCREEN_OOBE_UPDATE] = kScreenOobeUpdate;
screen_names_[SCREEN_OOBE_ENROLLMENT] = kScreenOobeEnrollment;
screen_names_[SCREEN_GAIA_SIGNIN] = kScreenGaiaSignin;
screen_names_[SCREEN_ACCOUNT_PICKER] = kScreenAccountPicker;
screen_names_[SCREEN_KIOSK_AUTOLAUNCH] = kScreenKioskAutolaunch;
screen_names_[SCREEN_KIOSK_ENABLE] = kScreenKioskEnable;
screen_names_[SCREEN_ERROR_MESSAGE] = kScreenErrorMessage;
screen_names_[SCREEN_USER_IMAGE_PICKER] = kScreenUserImagePicker;
screen_names_[SCREEN_TPM_ERROR] = kScreenTpmError;
screen_names_[SCREEN_PASSWORD_CHANGED] = kScreenPasswordChanged;
screen_names_[SCREEN_CREATE_MANAGED_USER_FLOW] =
kScreenManagedUserCreationFlow;
screen_names_[SCREEN_TERMS_OF_SERVICE] = kScreenTermsOfService;
screen_names_[SCREEN_WRONG_HWID] = kScreenWrongHWID;
screen_names_[SCREEN_APP_LAUNCH_SPLASH] = kScreenAppLaunchSplash;
screen_names_[SCREEN_CONFIRM_PASSWORD] = kScreenConfirmPassword;
screen_names_[SCREEN_MESSAGE_BOX] = kScreenMessageBox;
screen_ids_.clear();
for (size_t i = 0; i < screen_names_.size(); ++i)
screen_ids_[screen_names_[i]] = static_cast<Screen>(i);
}
void OobeUI::AddScreenHandler(BaseScreenHandler* handler) {
web_ui()->AddMessageHandler(handler);
handlers_.push_back(handler);
}
void OobeUI::InitializeHandlers() {
ready_ = true;
for (size_t i = 0; i < ready_callbacks_.size(); ++i)
ready_callbacks_[i].Run();
ready_callbacks_.clear();
// Notify 'initialize' for synchronously loaded screens.
for (size_t i = 0; i < handlers_.size(); ++i) {
if (handlers_[i]->async_assets_load_id().empty())
handlers_[i]->InitializeBase();
}
}
void OobeUI::OnScreenAssetsLoaded(const std::string& async_assets_load_id) {
DCHECK(!async_assets_load_id.empty());
for (size_t i = 0; i < handlers_.size(); ++i) {
if (handlers_[i]->async_assets_load_id() == async_assets_load_id)
handlers_[i]->InitializeBase();
}
}
bool OobeUI::IsJSReady(const base::Closure& display_is_ready_callback) {
if (!ready_)
ready_callbacks_.push_back(display_is_ready_callback);
return ready_;
}
void OobeUI::ShowOobeUI(bool show) {
core_handler_->ShowOobeUI(show);
}
void OobeUI::ShowRetailModeLoginSpinner() {
signin_screen_handler_->ShowRetailModeLoginSpinner();
}
void OobeUI::ShowSigninScreen(const LoginScreenContext& context,
SigninScreenHandlerDelegate* delegate,
NativeWindowDelegate* native_window_delegate) {
signin_screen_handler_->SetDelegate(delegate);
signin_screen_handler_->SetNativeWindowDelegate(native_window_delegate);
LoginScreenContext actual_context(context);
actual_context.set_oobe_ui(core_handler_->show_oobe_ui());
signin_screen_handler_->Show(actual_context);
}
void OobeUI::ResetSigninScreenHandlerDelegate() {
signin_screen_handler_->SetDelegate(NULL);
signin_screen_handler_->SetNativeWindowDelegate(NULL);
}
void OobeUI::AddObserver(Observer* observer) {
observer_list_.AddObserver(observer);
}
void OobeUI::RemoveObserver(Observer* observer) {
observer_list_.RemoveObserver(observer);
}
const std::string& OobeUI::GetScreenName(Screen screen) const {
DCHECK(screen >= 0 && screen < SCREEN_UNKNOWN);
return screen_names_[static_cast<size_t>(screen)];
}
void OobeUI::OnCurrentScreenChanged(const std::string& screen) {
if (screen_ids_.count(screen)) {
Screen new_screen = screen_ids_[screen];
FOR_EACH_OBSERVER(Observer,
observer_list_,
OnCurrentScreenChanged(current_screen_, new_screen));
current_screen_ = new_screen;
} else {
NOTREACHED() << "Screen should be registered in InitializeScreenMaps()";
current_screen_ = SCREEN_UNKNOWN;
}
}
} // namespace chromeos<|fim▁end|> | ThemeSource* theme = new ThemeSource(profile);
content::URLDataSource::Add(profile, theme);
|
<|file_name|>area_frame_allocator.rs<|end_file_name|><|fim▁begin|>use core::ops::Range;
use mem::{Frame, FrameAllocator};
use multiboot2::{MemoryArea, MemoryAreaIter};
pub struct AreaFrameAllocator {
area: Option<&'static MemoryArea>,
areas: MemoryAreaIter,
next: Frame,
kernel: Range<Frame>,
multiboot: Range<Frame>,
}
impl FrameAllocator for AreaFrameAllocator {
fn alloc(&mut self) -> Option<Frame> {
if let Some(area) = self.area {
let frame = Frame { number: self.next.number };<|fim▁hole|> Frame::containing(last_address as usize)
};
if frame > last_frame {
self.select_next_area();
return self.alloc();
} else {
self.next.number += 1;
return Some(frame);
}
}
None
}
fn free(&mut self, _frame: Frame) {
unimplemented!()
}
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize,
kernel_end: usize,
multiboot_start: usize,
multiboot_end: usize,
memory_areas: MemoryAreaIter)
-> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
area: None,
areas: memory_areas,
next: Frame::containing(0),
kernel: Range {
start: Frame::containing(kernel_start),
end: Frame::containing(kernel_end),
},
multiboot: Range {
start: Frame::containing(multiboot_start),
end: Frame::containing(multiboot_end),
},
};
allocator.select_next_area();
allocator
}
fn select_next_area(&mut self) {
self.area = self.areas
.clone()
.filter(|area| {
let last_address = area.base_addr + area.length - 1;
let frame = Frame::containing(last_address as usize);
frame >= self.next &&
!(self.kernel.start.number..(self.kernel.end.number)).contains(frame.number) &&
!(self.multiboot.start.number..(self.multiboot.end.number))
.contains(frame.number)
})
.min_by_key(|area| area.base_addr);
if let Some(area) = self.area {
let first_frame = Frame::containing(area.base_addr as usize);
if self.next < first_frame {
self.next = first_frame;
}
}
}
}<|fim▁end|> |
let last_frame = {
let last_address = area.base_addr + area.length - 1; |
<|file_name|>htmlcanvaselement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::{CanvasMsg, FromLayoutMsg, CanvasData};
use dom::attr::Attr;
use dom::attr::AttrValue;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::CanvasRenderingContext2DBinding::CanvasRenderingContext2DMethods;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding::HTMLCanvasElementMethods;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLContextAttributes;
use dom::bindings::codegen::UnionTypes::CanvasRenderingContext2DOrWebGLRenderingContext;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{HeapGCValue, JS, LayoutJS, Root};
use dom::bindings::num::Finite;
use dom::bindings::reflector::Reflectable;
use dom::canvasrenderingcontext2d::{CanvasRenderingContext2D, LayoutCanvasRenderingContext2DHelpers};
use dom::document::Document;
use dom::element::{AttributeMutation, Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::webglrenderingcontext::{LayoutCanvasWebGLRenderingContextHelpers, WebGLRenderingContext};
use euclid::size::Size2D;
use image::ColorType;
use image::png::PNGEncoder;
use ipc_channel::ipc::{self, IpcSender};
use js::jsapi::{HandleValue, JSContext};
use offscreen_gl_context::GLContextAttributes;
use rustc_serialize::base64::{STANDARD, ToBase64};
use std::iter::repeat;
use string_cache::Atom;
use util::str::DOMString;
const DEFAULT_WIDTH: u32 = 300;
const DEFAULT_HEIGHT: u32 = 150;
#[must_root]
#[derive(JSTraceable, Clone, HeapSizeOf)]
pub enum CanvasContext {
Context2d(JS<CanvasRenderingContext2D>),
WebGL(JS<WebGLRenderingContext>),
}
impl HeapGCValue for CanvasContext {}
#[dom_struct]
pub struct HTMLCanvasElement {
htmlelement: HTMLElement,
context: DOMRefCell<Option<CanvasContext>>,
}
impl HTMLCanvasElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLCanvasElement {
HTMLCanvasElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
context: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLCanvasElement> {
let element = HTMLCanvasElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLCanvasElementBinding::Wrap)
}
fn recreate_contexts(&self) {
let size = self.get_size();
if let Some(ref context) = *self.context.borrow() {
match *context {
CanvasContext::Context2d(ref context) => context.set_bitmap_dimensions(size),
CanvasContext::WebGL(ref context) => context.recreate(size),
}
}
}
pub fn get_size(&self) -> Size2D<i32> {
Size2D::new(self.Width() as i32, self.Height() as i32)
}
pub fn origin_is_clean(&self) -> bool {
match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => context.origin_is_clean(),
_ => true,
}
}
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub trait LayoutHTMLCanvasElementHelpers {
fn data(&self) -> HTMLCanvasData;
}
impl LayoutHTMLCanvasElementHelpers for LayoutJS<HTMLCanvasElement> {
#[allow(unsafe_code)]
fn data(&self) -> HTMLCanvasData {
unsafe {
let canvas = &*self.unsafe_get();
let ipc_renderer = canvas.context.borrow_for_layout().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => {
context.to_layout().get_ipc_renderer()
},
CanvasContext::WebGL(ref context) => {
context.to_layout().get_ipc_renderer()
},
}
});
let width_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("width"));
let height_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("height"));
HTMLCanvasData {
ipc_renderer: ipc_renderer,
width: width_attr.map_or(DEFAULT_WIDTH, |val| val.as_uint()),
height: height_attr.map_or(DEFAULT_HEIGHT, |val| val.as_uint()),
}
}
}
}
impl HTMLCanvasElement {
pub fn ipc_renderer(&self) -> Option<IpcSender<CanvasMsg>> {
self.context.borrow().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => context.ipc_renderer(),
CanvasContext::WebGL(ref context) => context.ipc_renderer(),
}
})
}
pub fn get_or_init_2d_context(&self) -> Option<Root<CanvasRenderingContext2D>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let context = CanvasRenderingContext2D::new(GlobalRef::Window(window.r()), self, size);
*self.context.borrow_mut() = Some(CanvasContext::Context2d(JS::from_rooted(&context)));
}
match *self.context.borrow().as_ref().unwrap() {
CanvasContext::Context2d(ref context) => Some(Root::from_ref(&*context)),
_ => None,
}
}
#[allow(unsafe_code)]
pub fn get_or_init_webgl_context(&self,
cx: *mut JSContext,
attrs: Option<HandleValue>) -> Option<Root<WebGLRenderingContext>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let attrs = if let Some(webgl_attributes) = attrs {
if let Ok(ref attrs) = unsafe { WebGLContextAttributes::new(cx, webgl_attributes) } {
From::from(attrs)
} else {
debug!("Unexpected error on conversion of WebGLContextAttributes");
return None;
}
} else {
GLContextAttributes::default()
};
let maybe_ctx = WebGLRenderingContext::new(GlobalRef::Window(window.r()), self, size, attrs);
*self.context.borrow_mut() = maybe_ctx.map( |ctx| CanvasContext::WebGL(JS::from_rooted(&ctx)));
}
if let Some(CanvasContext::WebGL(ref context)) = *self.context.borrow() {
Some(Root::from_ref(&*context))
} else {
None
}
}
pub fn is_valid(&self) -> bool {
self.Height() != 0 && self.Width() != 0
}
pub fn fetch_all_data(&self) -> Option<(Vec<u8>, Size2D<i32>)> {
let size = self.get_size();
if size.width == 0 || size.height == 0 {
return None
}
let data = if let Some(renderer) = self.ipc_renderer() {
let (sender, receiver) = ipc::channel().unwrap();
let msg = CanvasMsg::FromLayout(FromLayoutMsg::SendData(sender));
renderer.send(msg).unwrap();
match receiver.recv().unwrap() {
CanvasData::Pixels(pixel_data)
=> pixel_data.image_data.to_vec(),
CanvasData::WebGL(_)
// TODO(ecoal95): Not sure if WebGL canvas is required for 2d spec,
// but I think it's not.
=> return None,
}
} else {
repeat(0xffu8).take((size.height as usize) * (size.width as usize) * 4).collect()
};
Some((data, size))
}
}
impl HTMLCanvasElementMethods for HTMLCanvasElement {
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_getter!(Width, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_setter!(SetWidth, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_getter!(Height, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_setter!(SetHeight, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-getcontext
fn GetContext(&self,
cx: *mut JSContext,
id: DOMString,
attributes: Vec<HandleValue>)
-> Option<CanvasRenderingContext2DOrWebGLRenderingContext> {
match &*id {
"2d" => {
self.get_or_init_2d_context()
.map(CanvasRenderingContext2DOrWebGLRenderingContext::CanvasRenderingContext2D)
}
"webgl" | "experimental-webgl" => {
self.get_or_init_webgl_context(cx, attributes.get(0).cloned())
.map(CanvasRenderingContext2DOrWebGLRenderingContext::WebGLRenderingContext)
}
_ => None
}
}
// https://html.spec.whatwg.org/multipage/#dom-canvas-todataurl
fn ToDataURL(&self,
_context: *mut JSContext,
_mime_type: Option<DOMString>,
_arguments: Vec<HandleValue>) -> Fallible<DOMString> {
// Step 1.
if let Some(CanvasContext::Context2d(ref context)) = *self.context.borrow() {
if !context.origin_is_clean() {
return Err(Error::Security);
}
}
// Step 2.
if self.Width() == 0 || self.Height() == 0 {
return Ok(DOMString::from("data:,"));
}
// Step 3.
let raw_data = match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => {
let window = window_from_node(self);
let image_data = try!(context.GetImageData(Finite::wrap(0f64), Finite::wrap(0f64),
Finite::wrap(self.Width() as f64),
Finite::wrap(self.Height() as f64)));
image_data.get_data_array(&GlobalRef::Window(window.r()))
}
None => {
// Each pixel is fully-transparent black.
vec![0; (self.Width() * self.Height() * 4) as usize]
}
_ => return Err(Error::NotSupported) // WebGL
};
// Only handle image/png for now.
let mime_type = "image/png";
let mut encoded = Vec::new();
{<|fim▁hole|>
let encoded = encoded.to_base64(STANDARD);
Ok(DOMString::from(format!("data:{};base64,{}", mime_type, encoded)))
}
}
impl VirtualMethods for HTMLCanvasElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!("width") | &atom!("height") => self.recreate_contexts(),
_ => (),
};
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("width") => AttrValue::from_u32(value, DEFAULT_WIDTH),
&atom!("height") => AttrValue::from_u32(value, DEFAULT_HEIGHT),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl<'a> From<&'a WebGLContextAttributes> for GLContextAttributes {
fn from(attrs: &'a WebGLContextAttributes) -> GLContextAttributes {
GLContextAttributes {
alpha: attrs.alpha,
depth: attrs.depth,
stencil: attrs.stencil,
antialias: attrs.antialias,
premultiplied_alpha: attrs.premultipliedAlpha,
preserve_drawing_buffer: attrs.preserveDrawingBuffer,
}
}
}
pub mod utils {
use dom::window::Window;
use ipc_channel::ipc;
use net_traits::image_cache_thread::{ImageCacheChan, ImageResponse};
use url::Url;
pub fn request_image_from_cache(window: &Window, url: Url) -> ImageResponse {
let image_cache = window.image_cache_thread();
let (response_chan, response_port) = ipc::channel().unwrap();
image_cache.request_image(url, ImageCacheChan(response_chan), None);
let result = response_port.recv().unwrap();
result.image_response
}
}<|fim▁end|> | let encoder: PNGEncoder<&mut Vec<u8>> = PNGEncoder::new(&mut encoded);
encoder.encode(&raw_data, self.Width(), self.Height(), ColorType::RGBA(8)).unwrap();
} |
<|file_name|>sandbox.js<|end_file_name|><|fim▁begin|>'use es6';
const dotenv = require('dotenv');
const Lyft = require('./build/index');
dotenv.load();
const lyft = new Lyft(process.env.LYFT_CLIENT_ID, process.env.LYFT_CLIENT_SECRET);
const query = {
start: {
latitude: 37.7833,
longitude: -122.4167,
},
end: {
latitude: 37.7922,
longitude: -122.4012,<|fim▁hole|>};
lyft.getRideEstimates(query)
.then((result) => {
console.log(result);
})
.catch((error) => {
console.log(error);
});<|fim▁end|> | },
rideType: 'lyft_line', |
<|file_name|>test_SLPn.py<|end_file_name|><|fim▁begin|>import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from matplotlib import path
from quadr import quadr
from LapSLPmatrix import LapSLPmatrix
def test_SLPn():
# test data dir
import os
dir_path = os.path.dirname(os.path.realpath(__file__)) + "/TestData/"
circle = sio.loadmat(dir_path+'circle.mat')
x = circle['x']
N, M = x.shape
# set up source
s = {}
for l in range(0,M):
s_temp = {}
s_temp['x'] = x[:,l][:,np.newaxis]
s_temp = quadr(s_temp,N)
s[str(l)] = s_temp
# set up target
nx = 100
gx = np.arange(1,nx+1)/nx
ny = 100
gy = np.arange(1,ny+1)/ny # set up plotting
xx, yy = np.meshgrid(gx,gy)
zz = xx + 1j*yy
t = {}
ii = np.ones((nx*ny, ), dtype=bool)
for l in range(0,M):
s_temp = s[str(l)]
p = path.Path(np.vstack((np.real(s_temp['x']).T,np.imag(s_temp['x']).T)).T)
ii = (~p.contains_points(np.vstack((np.real(zz).flatten('F'),np.imag(zz).flatten('F'))).T))&ii
t['x'] = zz.flatten('F')[ii][np.newaxis].T
# multipole evaluation
u = 0*(1+1j)*zz
idx = ii.reshape(ny,nx,order='F')
for l in range(0,M):
s_temp = s[str(l)]
A = LapSLPmatrix(t,s_temp,0)
tau = np.sin(2*np.pi*np.real(s_temp['x'])) + np.cos(np.pi*np.imag(s_temp['x']))
u_temp = A.dot(tau)
u.T[idx.T] = u.T[idx.T] + u_temp.flatten()
if np.mod(l,25) == 0:
fig = plt.figure()
logerr = plt.imshow(np.real(u),aspect=nx/ny, interpolation='none')
fig.colorbar(logerr)
plt.grid(True)
plt.show()
fig = plt.figure()
logerr = plt.imshow(np.real(u),aspect=nx/ny, interpolation='none')
fig.colorbar(logerr)
plt.grid(True)
plt.show()
<|fim▁hole|><|fim▁end|> |
if __name__ == '__main__':
test_SLPn() |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Definitions for the ESP-IDF framework.<|fim▁hole|>#![stable(feature = "raw_ext", since = "1.1.0")]
pub mod fs;
pub mod raw;<|fim▁end|> | |
<|file_name|>cgmath_augment.rs<|end_file_name|><|fim▁begin|>use cgmath::{Point2, Vector2};
pub trait Cross<T, S> {
fn cross(&self, other: &T) -> S;
}
pub trait Dot<T, S> {
fn dot(&self, other: &T) -> S;
}
//stupid type rules won't let me add std::ops::Add for f32/f64
pub trait AddScalar<T, S> {
fn add_scalar(self, rhs: S) -> T;
}
impl<S> Cross<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Point2<S>) -> S {
(self.x * other.y) - (self.y * other.x)
}
}
impl<S> Cross<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn cross(&self, other: &Vector2<S>) -> S {<|fim▁hole|> (self.x * other.y) - (self.y * other.x)
}
}
impl<S> Dot<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Point2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> Dot<Vector2<S>, S> for Vector2<S> where S: cgmath::BaseFloat {
fn dot(&self, other: &Vector2<S>) -> S {
(self.x * other.x) + (self.y * other.y)
}
}
impl<S> AddScalar<Point2<S>, S> for Point2<S> where S: cgmath::BaseFloat {
fn add_scalar(self, rhs: S) -> Point2<S> {
Self {
x: self.x + rhs,
y: self.y + rhs
}
}
}<|fim▁end|> | |
<|file_name|>E0275.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT<|fim▁hole|>// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Foo {}
struct Bar<T>(T);
impl<T> Foo for T where Bar<T>: Foo {} //~ ERROR E0275
fn main() {
}<|fim▁end|> | // file at the top-level directory of this distribution and at |
<|file_name|>node_chain.py<|end_file_name|><|fim▁begin|># coding=utf-8
""" NodeChains are sequential orders of :mod:`~pySPACE.missions.nodes`
.. image:: ../../graphics/node_chain.png
:width: 500
There are two main use cases:
* the application for :mod:`~pySPACE.run.launch_live` and the
:mod:`~pySPACE.environments.live` using the default
:class:`NodeChain` and
* the benchmarking with :mod:`~pySPACE.run.launch` using
the :class:`BenchmarkNodeChain` with the
:mod:`~pySPACE.missions.operations.node_chain` operation.
.. seealso::
- :mod:`~pySPACE.missions.nodes`
- :ref:`node_list`
- :mod:`~pySPACE.missions.operations.node_chain` operation
.. image:: ../../graphics/launch_live.png
:width: 500
.. todo:: Documentation
This module extends/reimplements the original MDP flow class and
has some additional methods like reset(), save() etc.
Furthermore it supports the construction of NodeChains and
also running them inside nodes in parallel.
MDP is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
if __name__ == '__main__':
# add root of the code to system path
file_path = os.path.dirname(os.path.abspath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import cPickle
import gc
import logging
import multiprocessing
import shutil
import socket
import time
import uuid
import yaml
import pySPACE
from pySPACE.tools.filesystem import create_directory
from pySPACE.tools.socket_utils import talk, inform
from pySPACE.tools.conversion import python2yaml, replace_parameters_and_convert, replace_parameters
import copy
import warnings
import traceback
import numpy
class CrashRecoveryException(Exception):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, crashing_obj, parent_exception)
The crashing object is kept in self.crashing_obj
The triggering parent exception is kept in ``self.parent_exception``.
"""
errstr = args[0]
self.crashing_obj = args[1]
self.parent_exception = args[2]
# ?? python 2.5: super(CrashRecoveryException, self).__init__(errstr)
super(CrashRecoveryException,self).__init__(self, errstr)
def dump(self, filename = None):
"""
Save a pickle dump of the crashing object on filename.
If filename is None, the crash dump is saved on a file created by
the tempfile module.
Return the filename.
"""
import cPickle
import tempfile
if filename is None:
(fd, filename)=tempfile.mkstemp(suffix=".pic", prefix="NodeChainCrash_")
fl = os.fdopen(fd, 'w+b', -1)
else:
fl = open(filename, 'w+b', -1)
cPickle.dump(self.crashing_obj, fl)
fl.close()
return filename
class NodeChainException(Exception):
"""Base class for exceptions in node chains."""
pass
class NodeChainExceptionCR(CrashRecoveryException, NodeChainException):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, flow_instance, parent_exception)
The triggering parent exception is kept in self.parent_exception.
If ``flow_instance._crash_recovery`` is set, save a crash dump of
flow_instance on the file self.filename
"""
CrashRecoveryException.__init__(self, *args)
rec = self.crashing_obj._crash_recovery
errstr = args[0]
if rec:
if isinstance(rec, str):
name = rec
else:
name = None
name = CrashRecoveryException.dump(self, name)
dumpinfo = '\nA crash dump is available on: "%s"' % name
self.filename = name
errstr = errstr+dumpinfo
Exception.__init__(self, errstr)
class NodeChain(object):
""" Reimplement/overwrite mdp.Flow methods e.g., for supervised learning """
def __init__(self, node_sequence, crash_recovery=False, verbose=False):
""" Creates the NodeChain based on the node_sequence
.. note:: The NodeChain cannot be executed before not all trainable
nodes have been trained, i.e. self.trained() == True.
"""
self._check_nodes_consistency(node_sequence)
self.flow = node_sequence
self.verbose = verbose
self.set_crash_recovery(crash_recovery)
# Register the direct predecessor of a node as its input
# (i.e. we assume linear flows)
for i in range(len(node_sequence) - 1):
node_sequence[i+1].register_input_node(node_sequence[i])
self.use_test_data = False
# set a default run number
self[-1].set_run_number(0)
# give this flow a unique identifier
self.id = str(uuid.uuid4())
self.handler = None
self.store_intermediate_results = True
def train(self, data_iterators=None):
""" Train NodeChain with data from iterator or source node
The method can proceed in two different ways:
* If no data is provided, it is checked that the first node of
the flow is a source node. If that is the case, the data provided
by this node is passed forward through the flow. During this
forward propagation, the flow is trained.
The request of the data is done in the last node.
* If a list of data iterators is provided,
then it is checked that no source
and split nodes are contained in the NodeChain.
these nodes only include already a data handling
and should not be used, when training is done in different way.
Furthermore, split nodes are relevant for benchmarking.
One iterator for each node has to be given.
If only one is given, or no list, it is mapped to a list
with the same iterator for each node.
.. note:: The iterator approach is normally not used in pySPACE,
because pySPACE supplies the data with special
source nodes and is doing the training automatically
without explicit calls on data samples.
The approach came with MDP.
.. todo:: The iterator approach needs some use cases and testings,
especially, because it is not used in the normal setting.
"""
if data_iterators is not None:
# Check if no source and split nodes are contained in the node chain
assert(not self[0].is_source_node()), \
"Node chains with source nodes cannot be trained "\
"with external data_iterators!"
for node in self:
assert(not node.is_split_node()), \
"Node chains with split nodes cannot be trained "\
"with external data_iterators!"
# prepare iterables
if not type(data_iterators) == list:
data_iterators = [data_iterators] * len(self.flow)
elif not len(data_iterators)==len(self.flow):
data_iterators = [data_iterators] * len(self.flow)
# Delegate to iterative training
self.iter_train(data_iterators)
else: # Use the pySPACE train semantic and not MDP type
# Check if the first node of the node chain is a source node
assert(self[0].is_source_node()), \
"Training of a node chain without source node requires a "\
"data_iterator argument!"
# Training is accomplished by requesting the iterator
# of the last node of the chain. This node will recursively call
# the train method of all its predecessor nodes.
# As soon as the first element is yielded the node has been trained.
for _ in self[-1].request_data_for_training(
use_test_data=self.use_test_data):
return
def iter_train(self, data_iterables):
""" Train all trainable nodes in the NodeChain with data from iterator
*data_iterables* is a list of iterables, one for each node in the chain.
The iterators returned by the iterables must return data arrays that
are then used for the node training (so the data arrays are the data for
the nodes).
Note that the data arrays are processed by the nodes
which are in front of the node that gets trained, so the data dimension
must match the input dimension of the first node.
If a node has only a single training phase then instead of an iterable
you can alternatively provide an iterator (including generator-type
iterators). For nodes with multiple training phases this is not
possible, since the iterator cannot be restarted after the first
iteration. For more information on iterators and iterables see
http://docs.python.org/library/stdtypes.html#iterator-types .
In the special case that *data_iterables* is one single array,
it is used as the data array *x* for all nodes and training phases.
Instead of a data array *x* the iterators can also return a list or
tuple, where the first entry is *x* and the following are args for the
training of the node (e.g., for supervised training).
"""
data_iterables = self._train_check_iterables(data_iterables)
# train each Node successively
for i in range(len(self.flow)):
if self.verbose:
print "Training node #%d (%s)" % (i, str(self.flow[i]))
self._train_node(data_iterables[i], i)
if self.verbose:
print "Training finished"
self._close_last_node()
def trained(self):
"""
Returns whether the complete training is finished, i.e. if all nodes have been trained.
"""
return self[-1].get_remaining_train_phase() == 0
def execute(self, data_iterators=None):
""" Process the data through all nodes """
if data_iterators is not None:
# Delegate to super class
return self.iter_execute(data_iterators)
else: # Use the evaluate semantic
# Check if the first node of the flow is a source node
assert (self[0].is_source_node()), \
"Evaluation of a node chain without source node requires a " \
"data_iterator argument!"
# This is accomplished by calling the request_data_for_testing
# method of the last node of the chain. This node will recursively
# call the request_data_for_testing method of all its predecessor
# nodes
return self[-1].process()
def iter_execute(self, iterable, nodenr = None):
""" Process the data through all nodes in the chain till *nodenr*
'iterable' is an iterable or iterator (note that a list is also an
iterable), which returns data arrays that are used as input.
Alternatively, one can specify one data array as input.
If 'nodenr' is specified, the flow is executed only up to
node nr. 'nodenr'. This is equivalent to 'flow[:nodenr+1](iterable)'.
.. note:: In contrary to MDP, results are not concatenated
to one big object. Each data object remains separate.
"""
if isinstance(iterable, numpy.ndarray):
return self._execute_seq(iterable, nodenr)
res = []
empty_iterator = True
for x in iterable:
empty_iterator = False
res.append(self._execute_seq(x, nodenr))
if empty_iterator:
errstr = ("The execute data iterator is empty.")
raise NodeChainException(errstr)
return res
def _inc_train(self, data, class_label=None):
""" Iterate through the nodes to train them """
for node in self:
if node.is_retrainable() and not node.buffering and hasattr(node, "_inc_train"):
if not node.retraining_phase:
node.retraining_phase=True
node.start_retraining()
node._inc_train(data,class_label)
if not (hasattr(self, "buffering") and self.buffering):
data = node.execute(data)
else: # workaround to inherit meta data
self.buffering = False
data = node.execute(data)
self.buffering = True
def save(self, filename, protocol = -1):
""" Save a pickled representation to *filename*
If *filename* is None, return a string.
.. note:: the pickled NodeChain is not guaranteed to be upward or
backward compatible.
.. note:: Having C-Code in the node might cause problems with saving.
Therefore, the code has special handling for the
LibSVMClassifierNode.
.. todo:: Intrinsic node methods for storing should be used.
.. seealso:: :func:`store_node_chain`
"""
if self[-1].__class__.__name__ in ["LibSVMClassifierNode"] \
and self[-1].multinomial:
indx = filename.find(".pickle")
if indx != -1:
self[-1].save_model(filename[0:indx]+'.model')
else:
self[-1].save_model(filename+'.model')
import cPickle
odict = self.__dict__.copy() # copy the dict since we change it
# Remove other non-pickable stuff
remove_keys=[]
k = 0
for key, value in odict.iteritems():
if key == "input_node" or key == "flow":
continue
try:
cPickle.dumps(value)
except (ValueError, TypeError, cPickle.PicklingError):
remove_keys.append(key)
for key in remove_keys:
odict.pop(key)
self.__dict__ = odict
if filename is None:
return cPickle.dumps(self, protocol)
else:
# if protocol != 0 open the file in binary mode
if protocol != 0:
mode = 'wb'
else:
mode = 'w'
flh = open(filename , mode)
cPickle.dump(self, flh, protocol)
flh.close()
def get_output_type(self, input_type, as_string=True):
"""
Returns the output type of the entire node chain
Recursively iterate over nodes in flow
"""
output = input_type
for i in range(len(self.flow)):
if i == 0:
output = self.flow[i].get_output_type(
input_type, as_string=True)
else:
output = self.flow[i].get_output_type(output, as_string=True)
if as_string:
return output
else:
return self.string_to_class(output)
@staticmethod
def string_to_class(string_encoding):
""" given a string variable, outputs a class instance
e.g. obtaining a TimeSeries
"""
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.prediction_vector import PredictionVector
if "TimeSeries" in string_encoding:
return TimeSeries
elif "PredictionVector" in string_encoding:
return PredictionVector
elif "FeatureVector" in string_encoding:
return FeatureVector
else:
raise NotImplementedError
#################
# MDP Code copy #
def _propagate_exception(self, exception, nodenr):
# capture exception. the traceback of the error is printed and a
# new exception, containing the identity of the node in the NodeChain
# is raised. Allow crash recovery.
(etype, val, tb) = sys.exc_info()
prev = ''.join(traceback.format_exception(exception.__class__,
exception,tb))
act = "\n! Exception in node #%d (%s):\n" % (nodenr,
str(self.flow[nodenr]))
errstr = ''.join(('\n', 40*'-', act, 'Node Traceback:\n', prev, 40*'-'))
raise NodeChainExceptionCR(errstr, self, exception)
def _train_node(self, data_iterable, nodenr):
""" Train a single node in the flow.
nodenr -- index of the node in the flow
"""
node = self.flow[nodenr]
if (data_iterable is not None) and (not node.is_trainable()):
# attempted to train a node although it is not trainable.
# raise a warning and continue with the next node.
# wrnstr = "\n! Node %d is not trainable" % nodenr + \
# "\nYou probably need a 'None' iterable for"+\
# " this node. Continuing anyway."
#warnings.warn(wrnstr, UserWarning)
return
elif (data_iterable is None) and node.is_training():
# None instead of iterable is passed to a training node
err_str = ("\n! Node %d is training"
" but instead of iterable received 'None'." % nodenr)
raise NodeChainException(err_str)
elif (data_iterable is None) and (not node.is_trainable()):
# skip training if node is not trainable
return
try:
train_arg_keys = self._get_required_train_args(node)
train_args_needed = bool(len(train_arg_keys))
## We leave the last training phase open for the
## CheckpointFlow class.
## Checkpoint functions must close it explicitly if needed!
## Note that the last training_phase is closed
## automatically when the node is executed.
while True:
empty_iterator = True
for x in data_iterable:
empty_iterator = False
# the arguments following the first are passed only to the
# currently trained node, allowing the implementation of
# supervised nodes
if (type(x) is tuple) or (type(x) is list):
arg = x[1:]
x = x[0]
else:
arg = ()
# check if the required number of arguments was given
if train_args_needed:
if len(train_arg_keys) != len(arg):
err = ("Wrong number of arguments provided by " +
"the iterable for node #%d " % nodenr +
"(%d needed, %d given).\n" %
(len(train_arg_keys), len(arg)) +
"List of required argument keys: " +
str(train_arg_keys))
raise NodeChainException(err)
# filter x through the previous nodes
if nodenr > 0:
x = self._execute_seq(x, nodenr-1)
# train current node
node.train(x, *arg)
if empty_iterator:
if node.get_current_train_phase() == 1:
err_str = ("The training data iteration for node "
"no. %d could not be repeated for the "
"second training phase, you probably "
"provided an iterator instead of an "
"iterable." % (nodenr+1))
raise NodeChainException(err_str)
else:
err_str = ("The training data iterator for node "
"no. %d is empty." % (nodenr+1))
raise NodeChainException(err_str)
self._stop_training_hook()
# close the previous training phase
node.stop_training()
if node.get_remaining_train_phase() > 0:
continue
else:
break
except self.flow[-1].TrainingFinishedException, e:
# attempted to train a node although its training phase is already
# finished. raise a warning and continue with the next node.
wrnstr = ("\n! Node %d training phase already finished"
" Continuing anyway." % nodenr)
warnings.warn(wrnstr, UserWarning)
except NodeChainExceptionCR, e:
# this exception was already propagated,
# probably during the execution of a node upstream in the flow
(exc_type, val) = sys.exc_info()[:2]
prev = ''.join(traceback.format_exception_only(e.__class__, e))
prev = prev[prev.find('\n')+1:]
act = "\nWhile training node #%d (%s):\n" % (nodenr,
str(self.flow[nodenr]))
err_str = ''.join(('\n', 40*'=', act, prev, 40*'='))
raise NodeChainException(err_str)
except Exception, e:
# capture any other exception occurred during training.
self._propagate_exception(e, nodenr)
def _stop_training_hook(self):
"""Hook method that is called before stop_training is called."""
pass
@staticmethod
def _get_required_train_args(node):
"""Return arguments in addition to self and x for node.train.
Arguments that have a default value are ignored.
"""
import inspect
train_arg_spec = inspect.getargspec(node.train)
train_arg_keys = train_arg_spec[0][2:] # ignore self, x
if train_arg_spec[3]:
# subtract arguments with a default value
train_arg_keys = train_arg_keys[:-len(train_arg_spec[3])]
return train_arg_keys<|fim▁hole|> """Return the data iterables after some checks and sanitizing.
Note that this method does not distinguish between iterables and
iterators, so this must be taken care of later.
"""
# verifies that the number of iterables matches that of
# the signal nodes and multiplies them if needed.
flow = self.flow
# # if a single array is given wrap it in a list of lists,
# # note that a list of 2d arrays is not valid
# if isinstance(data_iterables, numpy.ndarray):
# data_iterables = [[data_iterables]] * len(flow)
if not isinstance(data_iterables, list):
err_str = ("'data_iterables' must be either a list of "
"iterables or an array, but got %s" %
str(type(data_iterables)))
raise NodeChainException(err_str)
# check that all elements are iterable
for i, iterable in enumerate(data_iterables):
if (iterable is not None) and (not hasattr(iterable, '__iter__')):
err = ("Element number %d in the data_iterables"
" list is not an iterable." % i)
raise NodeChainException(err)
# check that the number of data_iterables is correct
if len(data_iterables) != len(flow):
err_str = ("%d data iterables specified,"
" %d needed" % (len(data_iterables), len(flow)))
raise NodeChainException(err_str)
return data_iterables
def _close_last_node(self):
if self.verbose:
print "Close the training phase of the last node"
try:
self.flow[-1].stop_training()
except self.flow[-1].TrainingFinishedException:
pass
except Exception, e:
self._propagate_exception(e, len(self.flow)-1)
def set_crash_recovery(self, state = True):
"""Set crash recovery capabilities.
When a node raises an Exception during training, execution, or
inverse execution that the flow is unable to handle, a NodeChainExceptionCR
is raised. If crash recovery is set, a crash dump of the flow
instance is saved for later inspection. The original exception
can be found as the 'parent_exception' attribute of the
NodeChainExceptionCR instance.
- If 'state' = False, disable crash recovery.
- If 'state' is a string, the crash dump is saved on a file
with that name.
- If 'state' = True, the crash dump is saved on a file created by
the tempfile module.
"""
self._crash_recovery = state
def _execute_seq(self, x, nodenr = None):
""" Executes input data 'x' through the nodes 0..'node_nr' included
If no *nodenr* is specified, the complete node chain is used for
processing.
"""
flow = self.flow
if nodenr is None:
nodenr = len(flow)-1
for node_index in range(nodenr+1):
try:
x = flow[node_index].execute(x)
except Exception, e:
self._propagate_exception(e, node_index)
return x
def copy(self, protocol=None):
"""Return a deep copy of the flow.
The protocol parameter should not be used.
"""
import copy
if protocol is not None:
warnings.warn("protocol parameter to copy() is ignored",
DeprecationWarning, stacklevel=2)
return copy.deepcopy(self)
def __call__(self, iterable, nodenr = None):
"""Calling an instance is equivalent to call its 'execute' method."""
return self.iter_execute(iterable, nodenr=nodenr)
###### string representation
def __str__(self):
nodes = ', '.join([str(x) for x in self.flow])
return '['+nodes+']'
def __repr__(self):
# this should look like a valid Python expression that
# could be used to recreate an object with the same value
# eval(repr(object)) == object
name = type(self).__name__
pad = len(name)+2
sep = ',\n'+' '*pad
nodes = sep.join([repr(x) for x in self.flow])
return '%s([%s])' % (name, nodes)
###### private container methods
def __len__(self):
return len(self.flow)
def _check_dimension_consistency(self, out, inp):
"""Raise ValueError when both dimensions are set and different."""
if ((out and inp) is not None) and out != inp:
errstr = "dimensions mismatch: %s != %s" % (str(out), str(inp))
raise ValueError(errstr)
def _check_nodes_consistency(self, flow = None):
"""Check the dimension consistency of a list of nodes."""
if flow is None:
flow = self.flow
len_flow = len(flow)
for i in range(1, len_flow):
out = flow[i-1].output_dim
inp = flow[i].input_dim
self._check_dimension_consistency(out, inp)
def _check_value_type_isnode(self, value):
if not isinstance(value, pySPACE.missions.nodes.base.BaseNode):
raise TypeError("flow item must be Node instance")
def __getitem__(self, key):
if isinstance(key, slice):
flow_slice = self.flow[key]
self._check_nodes_consistency(flow_slice)
return self.__class__(flow_slice)
else:
return self.flow[key]
def __setitem__(self, key, value):
if isinstance(key, slice):
[self._check_value_type_isnode(item) for item in value]
else:
self._check_value_type_isnode(value)
# make a copy of list
flow_copy = list(self.flow)
flow_copy[key] = value
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
self.flow = flow_copy
def __delitem__(self, key):
# make a copy of list
flow_copy = list(self.flow)
del flow_copy[key]
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
self.flow = flow_copy
def __contains__(self, item):
return self.flow.__contains__(item)
def __iter__(self):
return self.flow.__iter__()
def __add__(self, other):
# append other to self
if isinstance(other, NodeChain):
flow_copy = list(self.flow).__add__(other.flow)
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
return self.__class__(flow_copy)
elif isinstance(other, pySPACE.missions.nodes.base.BaseNode):
flow_copy = list(self.flow)
flow_copy.append(other)
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
return self.__class__(flow_copy)
else:
err_str = ('can only concatenate flow or node'
' (not \'%s\') to flow' % (type(other).__name__))
raise TypeError(err_str)
def __iadd__(self, other):
# append other to self
if isinstance(other, NodeChain):
self.flow += other.flow
elif isinstance(other, pySPACE.missions.nodes.base.BaseNode):
self.flow.append(other)
else:
err_str = ('can only concatenate flow or node'
' (not \'%s\') to flow' % (type(other).__name__))
raise TypeError(err_str)
self._check_nodes_consistency(self.flow)
return self
###### public container methods
def append(self, x):
"""flow.append(node) -- append node to flow end"""
self[len(self):len(self)] = [x]
def extend(self, x):
"""flow.extend(iterable) -- extend flow by appending
elements from the iterable"""
if not isinstance(x, NodeChain):
err_str = ('can only concatenate flow'
' (not \'%s\') to flow' % (type(x).__name__))
raise TypeError(err_str)
self[len(self):len(self)] = x
def insert(self, i, x):
"""flow.insert(index, node) -- insert node before index"""
self[i:i] = [x]
def pop(self, i = -1):
"""flow.pop([index]) -> node -- remove and return node at index
(default last)"""
x = self[i]
del self[i]
return x
def reset(self):
""" Reset the flow and obey permanent_attributes where available
Method was moved to the end of class code, due to program environment
problems which needed the __getitem__ method beforehand.
"""
for i in range(len(self)):
self[i].reset()
class BenchmarkNodeChain(NodeChain):
""" This subclass overwrites the train method in order
to provide a more convenient way of doing supervised learning.
Furthermore, it contains a benchmark method that can be used for
benchmarking.
This includes logging, setting of run numbers,
delivering the result collection, handling of source and sink nodes, ...
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2008/08/18
"""
def __init__(self, node_sequence):
""" Creates the BenchmarkNodeChain based on the node_sequence """
super(BenchmarkNodeChain, self).__init__(node_sequence)
# Each BenchmarkNodeChain must start with an source node
# and end with a sink node
assert(self[0].is_source_node()), \
"A benchmark flow must start with a source node"
assert(self[-1].is_sink_node()), \
"A benchmark flow must end with a sink node"
def use_next_split(self):
"""
Use the next split of the data into training and test data
This method is useful for pySPACE-benchmarking
"""
# This is handled by calling use_next_split() of the last node of
# the flow which will recursively call predecessor nodes in the flow
# until a node is found that handles the splitting
return self[-1].use_next_split()
def benchmark(self, input_collection, run=0,
persistency_directory=None, store_node_chain=False):
""" Perform the benchmarking of this data flow with the given collection
Benchmarking is accomplished by iterating through all splits of the
data into training and test data.
**Parameters**:
:input_collection:
A sequence of data/label-tuples that serves as a generator or a
BaseDataset which contains the data to be processed.
:run:
The current run which defines all random seeds within the flow.
:persistency_directory:
Optional information of the nodes as well as the trained node chain
(if *store_node_chain* is not False) are stored to the given
*persistency_directory*.
:store_node_chain:
If True the trained flow is stored to *persistency_directory*.
If *store_node_chain* is a tuple of length 2---lets say (i1,i2)--
only the subflow starting at the i1-th node and ending at the
(i2-1)-th node is stored. This may be useful when the stored
flow should be used in an ensemble.
"""
# Inform the first node of this flow about the input collection
if hasattr(input_collection,'__iter__'):
# assume a generator is given
self[0].set_generator(input_collection)
else: # assume BaseDataset
self[0].set_input_dataset(input_collection)
# Inform all nodes recursively about the number of the current run
self[-1].set_run_number(int(run))
# set temp file folder
if persistency_directory != None:
self[-1].set_temp_dir(persistency_directory+os.sep+"temp_dir")
split_counter = 0
# For every split of the dataset
while True: # As long as more splits are available
# Compute the results for the current split
# by calling the method on its last node
self[-1].process_current_split()
if persistency_directory != None:
if store_node_chain:
self.store_node_chain(persistency_directory + os.sep + \
"node_chain_sp%s.pickle" % split_counter, store_node_chain)
# Store nodes that should be persistent
self.store_persistent_nodes(persistency_directory)
# If no more splits are available
if not self.use_next_split():
break
split_counter += 1
# print "Input benchmark"
# print gc.get_referrers(self[0].collection)
# During the flow numerous pointers are put to the flow but they are
# not deleted. So memory is not given free, which can be seen by the
# upper comment. Therefore we now free the input collection and only
# then the gc collector can free the memory. Otherwise under not yet
# found reasons, the pointers to the input collection will remain even
# between processes.
if hasattr(input_collection,'__iter__'):
self[0].set_generator(None)
else:
self[0].set_input_dataset(None)
gc.collect()
# Return the result collection of this flow
return self[-1].get_result_dataset()
def __call__(self, iterable=None, train_instances=None, runs=[]):
""" Call *execute* or *benchmark* and return (id, PerformanceResultSummary)
If *iterable* is given, calling an instance is equivalent to call its
'execute' method.
If *train_instances* and *runs* are given, 'benchmark' is called for
every run number specified and results are merged. This is useful for
e.g. parallel execution of subflows with the multiprocessing module,
since instance methods can not be serialized in Python but whole objects.
"""
if iterable != None:
return self.execute(iterable)
elif train_instances != None and runs != []: # parallelization case
# we have to reinitialize logging cause otherwise deadlocks occur
# when parallelization is done via multiprocessing.Pool
self.prepare_logging()
for ind, run in enumerate(runs):
result = self.benchmark(train_instances, run=run)
if ind == 0:
result_collection = result
else:
result_collection.data.update(result.data)
# reset node chain for new training if another call of
# :func:`benchmark` is expected.
if not ind == len(runs) - 1:
self.reset()
self.clean_logging()
return (self.id, result_collection)
else:
import warnings
warnings.warn("__call__ methods needs at least one parameter (data)")
return None
def store_node_chain(self, result_dir, store_node_chain):
""" Pickle this flow into *result_dir* for later usage"""
if isinstance(store_node_chain,basestring):
store_node_chain = eval(store_node_chain)
if isinstance(store_node_chain,tuple):
assert(len(store_node_chain) == 2)
# Keep only subflow starting at the i1-th node and ending at the
# (i2-1) node.
flow = NodeChain(self.flow[store_node_chain[0]:store_node_chain[1]])
elif isinstance(store_node_chain,list):
# Keep only nodes with indices contained in the list
# nodes have to be copied, otherwise input_node-refs of current flow
# are overwritten
from copy import copy
store_node_list = [copy(node) for ind, node in enumerate(self.flow) \
if ind in store_node_chain]
flow = NodeChain(store_node_list)
else:
# Per default, get rid of source and sink nodes
flow = NodeChain(self.flow[1:-1])
input_node = flow[0].input_node
flow[0].input_node = None
flow.save(result_dir)
def prepare_logging(self):
""" Set up logging
This method is only needed if one forks subflows, i.e. to execute them
via multiprocessing.Pool
"""
# Prepare remote logging
root_logger = logging.getLogger("%s-%s" % (socket.gethostname(),
os.getpid()))
root_logger.setLevel(logging.DEBUG)
root_logger.propagate = False
if len(root_logger.handlers)==0:
self.handler = logging.handlers.SocketHandler(socket.gethostname(),
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
root_logger.addHandler(self.handler)
def clean_logging(self):
""" Remove logging handlers if existing
Call this method only if you have called *prepare_logging* before.
"""
# Remove potential logging handlers
if self.handler is not None:
self.handler.close()
root_logger = logging.getLogger("%s-%s" % (socket.gethostname(),
os.getpid()))
root_logger.removeHandler(self.handler)
def store_persistent_nodes(self, result_dir):
""" Store all nodes that should be persistent """
# For all node
for index, node in enumerate(self):
# Store them in the result dir if they enabled storing
node.store_state(result_dir, index)
class NodeChainFactory(object):
""" Provide static methods to create and instantiate data flows
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2009/01/26
"""
@staticmethod
def flow_from_yaml(Flow_Class, flow_spec):
""" Creates a Flow object
Reads from the given *flow_spec*, which should be a valid YAML
specification of a NodeChain object, and returns this dataflow
object.
**Parameters**
:Flow_Class:
The class name of node chain to create. Valid are 'NodeChain' and
'BenchmarkNodeChain'.
:flow_spec:
A valid YAML specification stream; this could be a file object,
a string representation of the YAML file or the Python
representation of the YAML file (list of dicts)
"""
from pySPACE.missions.nodes.base_node import BaseNode
# Reads and parses the YAML file if necessary
if type(flow_spec) != list:
dataflow_spec = yaml.load(flow_spec)
else:
dataflow_spec = flow_spec
node_sequence = []
# For all nodes of the flow
for node_spec in dataflow_spec:
# Use factory method to create node
node_obj = BaseNode.node_from_yaml(node_spec)
# Append this node to the sequence of node
node_sequence.append(node_obj)
# Check if the nodes have to cache their outputs
for index, node in enumerate(node_sequence):
# If a node is trainable, it uses the outputs of its input node
# at least twice, so we have to cache.
if node.is_trainable():
node_sequence[index - 1].set_permanent_attributes(caching = True)
# Split node might also request the data from their input nodes
# (once for each split), depending on their implementation. We
# assume the worst case and activate caching
if node.is_split_node():
node_sequence[index - 1].set_permanent_attributes(caching = True)
# Create the flow based on the node sequence and the given flow class
# and return it
return Flow_Class(node_sequence)
@staticmethod
def instantiate(template, parametrization):
""" Instantiate a template recursively for the given parameterization
Instantiate means to replace the parameter in the template by the
chosen value.
**Parameters**
:template:
A dictionary with key-value pairs, where values might contain
parameter keys which have to be replaced. A typical example of a
template would be a Python representation of a node read from YAML.
:parametrization:
A dictionary with parameter names as keys and exact one value for
this parameter as value.
"""
instance = {}
for key, value in template.iteritems():
if value in parametrization.keys(): # Replacement
instance[key] = parametrization[value]
elif isinstance(value, dict): # Recursive call
instance[key] = NodeChainFactory.instantiate(value, parametrization)
elif isinstance(value, basestring): # String replacement
for param_key, param_value in parametrization.iteritems():
try:
value = value.replace(param_key, repr(param_value))
except:
value = value.replace(param_key, python2yaml(param_value))
instance[key] = value
elif hasattr(value, "__iter__"):
# Iterate over all items in sequence
instance[key] = []
for iter_item in value:
if iter_item in parametrization.keys(): # Replacement
instance[key].append(parametrization[iter_item])
elif isinstance(iter_item, dict):
instance[key].append(NodeChainFactory.instantiate(
iter_item, parametrization))
elif isinstance(value, basestring): # String replacement
for param_key, param_value in parametrization.iteritems():
try:
iter_item = iter_item.replace(param_key,
repr(param_value))
except:
iter_item = iter_item.replace(
param_key, python2yaml(param_value))
instance[key] = value
else:
instance[key].append(iter_item)
else: # Not parameterized
instance[key] = value
return instance
@staticmethod
def replace_parameters_in_node_chain(node_chain_template, parametrization):
node_chain_template = copy.copy(node_chain_template)
if parametrization == {}:
return node_chain_template
elif type(node_chain_template) == list:
return [NodeChainFactory.instantiate(
template=node,parametrization=parametrization)
for node in node_chain_template]
elif isinstance(node_chain_template, basestring):
node_chain_template = \
replace_parameters(node_chain_template, parametrization)
return node_chain_template
class SubflowHandler(object):
""" Interface for nodes to generate and execute subflows (subnode-chains)
A subflow means a node chain used inside a node for processing data.
This class provides functions that can be used by nodes to generate and
execute subflows. It serves thereby as a communication daemon to the
backend (if it is used).
Most important when inheriting from this class is that the subclass MUST be
a node. The reason is that this class uses node functionality, e.g. logging,
the *temp_dir*-variable and so on.
**Parameters**
:processing_modality:
One of the valid strings: 'backend', 'serial', 'local'.
:backend:
The current backends modality is used. This is implemented
at the moment only for 'LoadlevelerBackend' and 'LocalBackend'.
:serial:
All subflows are executed sequentially, i.e. one after the
other.
:local:
Subflows are executed in a Pool using *pool_size* cpus. This
may be also needed when no backend is used.
(*optional, default: 'serial'*)
:pool_size:
If a parallelization is based on using several processes on a local
system in parallel, e.g. option 'backend' and
:class:`pySPACEMulticoreBackend`
or option
'local', the number of worker processes for subflow evaluation has
to be specified.
.. note:: When using the LocalBackend, there is also the possibility
to specify the pool size of parallel executed
processes, e.g. data sets. Your total number of cpu's
should be pool size (pySPACE) + pool size (subflows).
(*optional, default: 2*)
:batch_size:
If parallelization of subflow execution is done together with the
:class:`~pySPACE.environments.backends.ll_backend.LoadLevelerBackend`,
*batch_size* determines how many subflows are executed in one
serial LoadLeveler job. This option is useful if execution of a
single subflow is really short (range of seconds) since there is
significant overhead in creating new jobs.
(*optional, default: 1*)
:Author: Anett Seeland ([email protected])
:Created: 2012/09/04
:LastChange: 2012/11/06 batch_size option added
"""
def __init__(self, processing_modality='serial', pool_size=2, batch_size=1,
**kwargs):
self.modality = processing_modality
self.pool_size = int(pool_size)
self.batch_size = int(batch_size)
# a flag to send pool_size / batch_size only once to the backend
self.already_send = False
self.backend_com = None
self.backend_name = None
# to indicate the end of a message received over a socket
self.end_token = '!END!'
if processing_modality not in ["serial", "local", "backend"]:
import warnings
warnings.warn("Processing modality not found! Serial mode is used!")
self.modality = 'serial'
@staticmethod
def generate_subflow(flow_template, parametrization=None, flow_class=None):
""" Return a *flow_class* object of the given *flow_template*
This methods wraps two function calls (NodeChainFactory.instantiate and
NodeChainFactory.flow_from_yaml.
**Parameters**
:flow_template:
List of dicts - a valid representation of a node chain.
Alternatively, a YAML-String representation could be used,
which simplifies parameter replacement.
:parametrization:
A dictionary with parameter names as keys and exact one value for
this parameter as value. Passed to NodeChainFactory.instantiate
(*optional, default: None*)
:flow_class:
The flow class name of which an object should be returned
(*optional, default: BenchmarkNodeChain*)
"""
if flow_class is None:
flow_class = BenchmarkNodeChain
flow_spec = NodeChainFactory.replace_parameters_in_node_chain(
flow_template,parametrization)
# create a new Benchmark flow
flow = NodeChainFactory.flow_from_yaml(flow_class, flow_spec)
return flow
def execute_subflows(self, train_instances, subflows, run_numbers=None):
""" Execute subflows and return result collection.
**Parameters**
:training_instances:
List of training instances which should be used to execute
*subflows*.
:subflows:
List of BenchmarkNodeChain objects.
..note:: Note that every subflow object is stored in memory!
:run_numbers:
All subflows will be executed with every run_number specified in
this list. If None, the current self.run_number (from the node
class) is used.
(*optional, default: None*)
"""
if run_numbers == None:
run_numbers = [self.run_number]
# in case of serial backend, modality is mapped to serial
# in the other case communication must be set up and
# jobs need to be submitted to backend
if self.modality == 'backend':
self.backend_com = pySPACE.configuration.backend_com
if not self.backend_com is None:
# ask for backend_name
# create a socket and keep it alive as long as possible since
# handshaking costs really time
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(self.backend_com)
client_socket, self.backend_name = talk('name' + self.end_token,
client_socket, self.backend_com)
else:
import warnings #necessary for serial backend!
warnings.warn("Seems that no backend is used! Modality of subflow execution "\
"has to be specified! Assuming serial backend.")
self.backend_name = 'serial'
self._log("Preparing subflows for backend execution.")
if self.backend_name in ['loadl','mcore'] :
# we have to pickle training instances and store it on disk
store_path = os.path.join(self.temp_dir,
"sp%d" % self.current_split)
create_directory(store_path)
filename = os.path.join(store_path, "subflow_data.pickle")
if not os.path.isfile(filename):
cPickle.dump(train_instances, open(filename,'wb'),
protocol=cPickle.HIGHEST_PROTOCOL)
subflows_to_compute = [subflows[ind].id for ind in \
range(len(subflows))]
if self.backend_name == 'loadl':
# send batch_size to backend if not already done
if not self.already_send:
client_socket = inform("subflow_batchsize;%d%s" % \
(self.batch_size, self.end_token),
client_socket, self.backend_com)
self.already_send = True
for subflow in subflows:
cPickle.dump(subflow, open(os.path.join(store_path,
subflow.id+".pickle"),"wb"),
protocol=cPickle.HIGHEST_PROTOCOL)
send_flows = subflows_to_compute
else: # backend_name == mcore
# send pool_size to backend if not already done
if not self.already_send:
client_socket = inform("subflow_poolsize;%d%s" % \
(self.pool_size, self.end_token),
client_socket, self.backend_com)
self.already_send = True
# send flow objects via socket
send_flows = [cPickle.dumps(subflow, cPickle.HIGHEST_PROTOCOL) \
for subflow in subflows]
# inform backend
client_socket,msg = talk('execute_subflows;%s;%d;%s;%s%s' % \
(store_path, len(subflows), str(send_flows),
str(run_numbers), self.end_token),
client_socket, self.backend_com)
time.sleep(10)
not_finished_subflows = set(subflows_to_compute)
while len(not_finished_subflows) != 0:
# ask backend for finished jobs
client_socket, msg = talk('is_ready;%d;%s%s' % \
(len(not_finished_subflows), str(not_finished_subflows),
self.end_token), client_socket, self.backend_com)
# parse message
finished_subflows = eval(msg) #should be a set
# set difference
not_finished_subflows -= finished_subflows
time.sleep(10)
if self.backend_name == 'loadl':
# read results and delete store_dir
result_pattern = os.path.join(store_path, '%s_result.pickle')
result_collections = [cPickle.load(open(result_pattern % \
subflows[ind].id,'rb')) for ind in range(len(subflows))]
# ..todo:: check if errors have occurred and if so do not delete!
shutil.rmtree(store_path)
else: # backend_name == mcore
# ask backend to send results
client_socket, msg = talk("send_results;%s!END!" % \
subflows_to_compute, client_socket, self.backend_com)
# should be a list of collections
results = eval(msg)
result_collections = [cPickle.loads(result) for result in results]
self._log("Finished subflow execution.")
client_socket.shutdown(socket.SHUT_RDWR)
client_socket.close()
return result_collections
elif self.backend_name == 'serial':
# do the same as modality=='serial'
self.modality = 'serial'
else: # e.g. mpi backend :
import warnings
warnings.warn("Subflow Handling with %s backend not supported,"\
" serial-modality is used!" % self.backend_name)
self.modality = 'serial'
if self.modality == 'serial':
# serial execution
# .. note:: the here executed flows can not store anything.
# meta data of result collection is NOT updated!
results = [subflow(train_instances=train_instances,
runs=run_numbers) for subflow in subflows]
result_collections = [result[1] for result in results]
return result_collections
else: # modality local, e.g. usage without backend in application case
self._log("Subflow Handler starts processes in pool.")
pool = multiprocessing.Pool(processes=self.pool_size)
results = [pool.apply_async(func=subflow,
kwds={"train_instances": train_instances,
"runs": run_numbers}) \
for subflow in subflows]
pool.close()
self._log("Waiting for parallel processes to finish.")
pool.join()
result_collections = [result.get()[1] for result in results]
del pool
return result_collections<|fim▁end|> |
def _train_check_iterables(self, data_iterables): |
<|file_name|>index3.js<|end_file_name|><|fim▁begin|>import {TextFileLoader} from './painter/TextFileLoader'
import {InstancedGrid} from './painter/InstancedGrid'
import {QCurve} from './painter/QCurve'
import {QCurveObj} from './painter/QCurve'
import {StrokePath} from './painter/StrokePath'
import {Particle} from './painter/Particle'
import {VectorField} from './painter/VectorField'
import Preloader from "./preloader/Preloader"
import {MathUtils} from "./util/MathUtils"
import * as THREE from 'three'
import * as Stats from 'stats-js'
/**
* Created by David on 14/12/2016.
*/
//if ( !Detector.webgl ) Detector.addGetWebGLMessage();
///////////////////////
var container, stats;
var controls;
var camera, scene, renderer;
//var orientations;
//var offsets;
//var lengths;
//var speeds;
var fragshader;
var vertshader;
TextFileLoader.Instance().loadFiles(["shaders/fragShader.glsl","shaders/vertShader.glsl"], filesLoaded);
var textureFilenames = [];
for(var i=1;i<=41;++i)
{
textureFilenames.push( "colourful" + "/" + ((i < 10) ?"0":"") + i + ".jpg");
}
var textureIX = 0;
function filesLoaded(files)
{
fragshader = files[0];
vertshader = files[1];
makeMeshObj();
}
// emit particles from a bound, bottom of the bound is the horizon line?
// if camera is cetnered, then it should be centered in x pos
// if camera is not centered,
var ExportMode = {
"png": "png",
"jpg": "jpg"
};
var exportMode = ExportMode.png;
//var renderScale = 5.4;
var renderScale,bw,bh,bottomy;
var Mode = {
"skyline": "skyline",
"maps": "maps"
};
var mode = Mode.maps;
//renderScale = 7.2;
renderScale = 1.0;
if(mode == Mode.skyline)
{
bw = 1000;
bh = bw*(3/4);
bottomy = bh *0.6;
}
else{
// "maps"
// renderScale = 7.2;
bh = 1000;
bw = bh*(3/4);
bottomy = bh * 1.0;
}
var w = bw * renderScale;
var h = bh * renderScale;
var noiseOffsetX, noiseOffsetY ;
function init() {
randomiseField();
container = document.getElementById( 'container' );
//var w = window.innerWidth;
//var h = window.innerHeight;
// w = 6000;
// h = 6000;
// todo uncenter the camera
//camera = new THREE.OrthographicCamera( w / - 2, w / 2, h / 2, h / - 2, - 500, 500 );
camera = new THREE.OrthographicCamera( 0, w , h , 0, - 500, 500 );
// camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 1, 10000 );
//camera.position.z = 20;
//camera.position.z = 50;
renderer = new THREE.WebGLRenderer({ antialias: true, preserveDrawingBuffer: true });
scene = new THREE.Scene();
// mouse orbit control
/*
controls = new THREE.OrbitControls( camera, renderer.domElement );
controls.enableDamping = true;
controls.dampingFactor = 0.25;
controls.enableZoom = false;*/
/*
controls = new THREE.TrackballControls(camera);
controls.rotateSpeed = 10.0;
controls.zoomSpeed = 10.2;
controls.panSpeed = 0.8;
controls.noZoom = false;
controls.noPan = false;
controls.staticMoving = true;
controls.dynamicDampingFactor = 0.3;
*/
if ( renderer.extensions.get( 'ANGLE_instanced_arrays' ) === false ) {
document.getElementById( "notSupported" ).style.display = "";
return;
}
renderer.setClearColor( 0xFFFFFF );
renderer.autoClear = false;
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( w,h);
var div = document.getElementById("canvasContainer");
div.appendChild(renderer.domElement );
// document.body.appendChild( renderer.domElement );
renderer.clear();
/*
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
document.body.appendChild( stats.domElement );
*/
window.addEventListener( 'resize', onWindowResize, false );
renderer.domElement.addEventListener( 'mousemove', onMouseMove, true );
renderer.domElement.addEventListener( 'mousedown', onMouseDown, true );
renderer.domElement.addEventListener( 'mouseup', onMouseUp, true );
createGui();
reset();
}
var ismousedown =false;
var mousex = 0;
var mousey = 0;
document.addEventListener('keydown',onDocumentKeyDown,false);
function onDocumentKeyDown(event) {
console.log(event);
if(event.key == 's') {
//saveAsImage();
// savePixels();
saveCanvas();
}
if(event.key == " ")
{
// next teuxtre
textureIX = (textureIX + 1) % textureFilenames.length;
reset();
}
if(event.key == "r")
{
randomiseField();
// refresh noise field
reset();
}
}
var gui;
var originalPoints = [];
for(var i = 0; i< 4;++i)
{
originalPoints.push({"x":0, "y":0});
}
var points = originalPoints.slice(0);
var rectModel = {
xOffset: 0.5, //
yOffset: 0.5,
xScale: 1,
yScale: 1,
imageFilename: "image"
};
updatePoints(); // inital points should be a normalised rect
console.log(points);
//////////////////////////////////////////////////////////////////////////////////////////////
// colour map sampling options
var particleOptions = {
directionForward: true
}
function createGui()
{
gui = new dat.GUI();
// My sample abject
var obj = {
flipX: function() {flipX();},
flipY: function() {flipY();},
rotate: function(){rotate();},
resetPoints: function(){resetPoints();}
};
// Number field with slider
gui.add(rectModel, "xOffset").min(0).max(1).step(0.01).onChange(function(val) {
// console.log("changed " + val);
updatePoints();
}
).listen();
gui.add(rectModel, "yOffset").min(0).max(1).step(0.01).onChange(function(val) {
// console.log("changed " + val);
updatePoints();
}
).listen();
gui.add(rectModel, "xScale").min(0).max(1).step(0.01).onChange(function(val) {
// console.log("changed " + val);
updatePoints();
}
).listen();
gui.add(rectModel, "yScale").min(0).max(1).step(0.01).onChange(function(val) {
//console.log("changed " + val);
updatePoints();
}
).listen();
// Checkbox field
gui.add(obj, "flipX");
gui.add(obj, "flipY");
gui.add(obj, "rotate");
gui.add(obj, "resetPoints");
gui.add(rectModel, "imageFilename").listen();
gui.add(particleOptions, "directionForward").listen();
}
function flipX()
{
console.log("flip x");
var temp = points.slice(0);
points[0] = temp[1];
points[1] = temp[0];
points[2] = temp[3];
points[3] = temp[2];
console.log(points);
}
function flipY()
{
console.log("flipY");
var temp = points.slice(0);
points[0] = temp[3];
points[1] = temp[2];
points[2] = temp[1];
points[3] = temp[0];
console.log(points);
}
function rotate()
{
var temp = points.slice(0);
points[0] = temp[1];
points[1] = temp[2];
points[2] = temp[3];
points[3] = temp[0];
console.log(points);
}
function resetPoints()
{
rectModel.xOffset = 0.5;
rectModel.yOffset = 0.5;
rectModel.xScale = 1;
rectModel.yScale = 1;
updatePoints();
points = originalPoints.slice(0);
console.log(points);
}
function updatePoints()
{
var w = rectModel.xScale;
var h = rectModel.yScale;
var x = (rectModel.xOffset - 0.5)*(1-w) - 0.5*w +0.5;
var y = (rectModel.yOffset - 0.5)*(1-h) - 0.5*h + 0.5;
originalPoints[0].x = x;
originalPoints[0].y = y;
originalPoints[1].x = x + w;
originalPoints[1].y = y;
originalPoints[2].x = x + w;
originalPoints[2].y = y + h;
originalPoints[3].x = x ;
originalPoints[3].y = y + h;
console.log(originalPoints);
}
function getPoint(x,y)
{
var p0 = points[0];
var p1 = points[1];
var p2 = points[2];
var p3 = points[3];
var x0 = p0.x + x*(p1.x - p0.x);
var y0 = p0.y + x*(p1.y - p0.y);
var x1 = p2.x + x*(p2.x - p3.x);
var y1 = p2.y + x*(p2.y - p3.y);
var tx = x0 + y*(x1- x0);
var ty = y0 + y*(y1- y0);
return {"y":ty,"x":tx};
}
//////////////////////////////////////////////////////////////////////////////////////////////
function randomiseField()
{
noiseOffsetX = MathUtils.GetRandomFloat(0,100);
noiseOffsetY = MathUtils.GetRandomFloat(0,100);
}
function reset()
{
console.log("reset");
imageDataLoaded = false;
// clear
renderer.clear();
// choose a texture and load it
var loader = new THREE.TextureLoader();
loader.setPath('textures/');
var imageURL = textureFilenames[textureIX];
// var imageURL = 'grad.png';
console.log("imageURL "+ imageURL);
rectModel.imageFilename = imageURL; // show filename for debugin
var _this = this;
var texture = loader.load(imageURL,
function ( texture ) {
// do something with the texture on complete
// console.log("texture", texture);
imagedata = getImageData(texture.image );
// console.log("imagedata", imagedata);
imageDataLoaded = true;
//test();
}
);
}
/////////////////////////////////////////////////////////////////////////////////////////////
function saveCanvas()
{
if(exportMode == ExportMode.png) {
renderer.domElement.toBlob(function(blob) {
saveAs(blob, "output" + MathUtils.GenerateUUID() + ".png");
});
}
else {
renderer.domElement.toBlob(function (blob) {
saveAs(blob, "output" + MathUtils.GenerateUUID() + ".jpg");
}, "image/jpeg");
}
}
////////////////////////////////////////////////
function getImageData( image ) {
var canvas = document.createElement( 'canvas' );
canvas.width = image.width;
canvas.height = image.height;
var context = canvas.getContext( '2d' );
context.drawImage( image, 0, 0 );
return context.getImageData( 0, 0, image.width, image.height );
}
function getPixel( imagedata, nx, ny ) {
var x = Math.floor( nx *(imagedata.width - 1));
var y = Math.floor( ny *(imagedata.height-1));
var position = ( x + imagedata.width * y ) * 4, data = imagedata.data;
return { r: data[ position ] /255.0, g: data[ position + 1 ]/255.0, b: data[ position + 2 ]/255.0, a: data[ position + 3 ]/255.0 };
}
var imagedata = null;
var imageDataLoaded = false;
//var preloader = new Preloader();
//load();
function load() {
//preloader.load(() => {
// this.scene.add(this.cube);
//this.render();
// var imgTexture = THREE.ImageUtils.loadTexture( "environment/floor.jpg" );
var loader = new THREE.TextureLoader();
loader.setPath('textures/');
var imageURL = '01.jpg';
// var imageURL = 'grad.png';
var _this = this;
var texture = loader.load(imageURL,
function (texture) {
// do something with the texture on complete
console.log("texture", texture);
imagedata = getImageData(texture.image);
console.log("imagedata", imagedata);
imageDataLoaded = true;
//test();
}
);
}
var field = new VectorField();
var p0s;
var p1s;
var p2s;
var q0s;
var q1s;
var q2s;
var colours0;
var colours1;
var startRs;
var endRs;
var nInstances;
var basepath;
var pathobj;
var ready = false;
var bufferix;
var grid;
function makeMeshObj()
{
//
basepath = new QCurve();
basepath.p1.x = 200;
basepath.p1.y = 0;
basepath.p2.x = 200;
basepath.p2.y = 100;
pathobj = new QCurveObj(basepath, 10);
// pathobj.addToScene(scene);
// geometry
nInstances = 200000; // max number of instances that can be render in one go
bufferix = 0;
grid = new InstancedGrid();
grid.print();
var nx = 2; // keep this as 2
var nz = 5; // resolution
var zLen = 25;
//grid.createTube(nx,nz,1,1,zLen);
//grid.createRectTube(7,5,100,40);
grid.createFlatGrid(nx,nz,1,1);
grid.createIndices(nx,nz);
grid.createUVGrid(nx,nz);
// per instance data
// offsets = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( false );
p0s = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( true);
p1s = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( true);
p2s = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( true);
q0s = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( true);
q1s = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( true);
q2s = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 3 ), 3, 1 ).setDynamic( true);
colours0 = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 4 ), 4, 1 ).setDynamic( true);
colours1 = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 4 ), 4, 1 ).setDynamic( true);
// remove this
// startRs = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 1 ), 1, 1 ).setDynamic( true);
// endRs = new THREE.InstancedBufferAttribute( new Float32Array( nInstances * 1 ), 1, 1 ).setDynamic( true);
//grid.geometry.addAttribute( 'offset', offsets ); // per mesh translation
grid.geometry.addAttribute( 'p0', p0s);
grid.geometry.addAttribute( 'p1', p1s);
grid.geometry.addAttribute( 'p2', p2s);
grid.geometry.addAttribute( 'q0', q0s);
grid.geometry.addAttribute( 'q1', q1s);
grid.geometry.addAttribute( 'q2', q2s);
grid.geometry.addAttribute( 'colour0', colours0);
grid.geometry.addAttribute( 'colour1', colours1);
// grid.geometry.addAttribute( 'startR', startRs);
// grid.geometry.addAttribute( 'endR', endRs);
var material = new THREE.RawShaderMaterial( {
uniforms: {
//map: { type: "t", value: texture }
},
vertexShader: vertshader,
//fragmentShader: FragShader,
fragmentShader: fragshader,
//side: THREE.DoubleSide,
transparent: true,
// wireframe: true
} );
var mesh = new THREE.Mesh( grid.geometry, material );
mesh.frustumCulled = false;
//var zoom = 0.5;
// mesh.position.y = meshPositionY;
mesh.scale.set(renderScale,renderScale);
scene.add( mesh );
//add a test horizon line
//addTestLine();
ready = true;
// drawParticle();
}
function addTestLine()
{
var material = new THREE.LineBasicMaterial({
color: 0xff0000
});
var geometry = new THREE.Geometry();
geometry.vertices.push(
new THREE.Vector3( -1000*renderScale, 0, 0 ),
new THREE.Vector3( 1000*renderScale, 0, 0 )
);
var line = new THREE.Line( geometry, material );
scene.add( line );
}
Math.clamp = function(number, min, max) {
return Math.max(min, Math.min(number, max));
}
function onMouseMove(event){
mousex = (event.clientX);
mousey = (event.clientY);
console.log(mousex,mousey);
//mouseY = (event.clientY - window.innerHeight/2) / window.innerHeight/2;
}
function onMouseUp(event){
ismousedown = false;
console.log("onMouseUp");
}
function onMouseDown(event){
ismousedown = true;
console.log("onMouseDown");
nsteps = 20 + Math.random()*160;
}
var nsteps = 20;
function drawParticleUpdate()
{
if(ismousedown)
{
var n = 50;
var nx = mousex/w + Math.random()*0.02 ;
var ny = mousey/h + Math.random()*0.02 ;
console.log(mousex/w, mousex/h);
var direction = particleOptions.directionForward ? 1: -1;// (Math.random() < 0.5)? -1 : 1;
var thickness = 0.5 + Math.random()*1.5;
var alpha = 0.3 + 0.7*Math.random();
for (var i = 0; i < n; ++i) {
drawParticle(nx,ny, direction, nsteps, thickness, alpha);
}
}
//drawRandomParticles(400);
}
function drawRandomParticles(n)
{
for (var i = 0; i < n; ++i) {
// particles are nomralised [0,1] -> remap to [-h,h]
var nx = Math.random()*0.99 ;
var ny = Math.random()*0.99 ;
var direction = (Math.random() < 0.5)? -1 : 1;
var thickness = 0.5 + Math.random()*1.5;
var nsteps = 30 + Math.random()*100;
var alpha = 0.3 + 0.7*Math.random();
drawParticle(nx,ny, direction, nsteps, thickness, alpha);
}
}
// draw particle at nx,ny
function drawParticle(nx,ny, direction, nsteps, thickness, alpha)
{
// todo use canvas coordinates
// convert to the emission bound
var canvasx = nx*bw; // stretch the width
var canvasy = bh - ny*( bottomy); // do
//get slight random position
var randomColPositionAmount= 0.01;
var colx = Math.clamp( MathUtils.GetRandomFloat(nx- randomColPositionAmount,nx + randomColPositionAmount) ,0,0.999);
var coly = Math.clamp( MathUtils.GetRandomFloat(ny- randomColPositionAmount,ny + randomColPositionAmount) ,0,0.999);
var transformedPoint = getPoint(colx,coly);
colx = transformedPoint.x;
coly = transformedPoint.y;
var col = getPixel(imagedata, colx,coly);
//var x =-1000+ nx*2000;
//var y =-450+ ny*950;
var particle;
// set a random seed
var seed = MathUtils.GetRandomIntBetween(0,100000);
// draw the shading (alpha black)
var brightness = 0.5;
MathUtils.SetSeed(seed); // rset seed
particle = new Particle(field);
var thicknessShade = Math.min( thickness + 4, thickness *1.2);
particle.init( canvasx,canvasy, thicknessShade, direction);
particle.noiseOffsetX = noiseOffsetX;
particle.noiseOffsetY = noiseOffsetY;
particle.strokePath.colour = new THREE.Vector3(col.r*brightness,col.g*brightness,col.b*brightness);
particle.strokePath.alpha = alpha*0.2;
for(var i =0; i< nsteps;++i)
{
particle.update(thicknessShade);
}
bufferix = particle.strokePath.constructPath(p0s,p1s,p2s,q0s,q1s,q2s,colours0,colours1,bufferix);
// draw the colour
MathUtils.SetSeed(seed); // rset seed
particle = new Particle(field);
particle.init(canvasx,canvasy, thickness, direction);
particle.noiseOffsetX = noiseOffsetX;
particle.noiseOffsetY = noiseOffsetY;
particle.strokePath.colour = new THREE.Vector3(col.r,col.g,col.b);
particle.strokePath.alpha =alpha;
for(var i =0; i< nsteps;++i)
{
particle.update(thickness);
}
bufferix = particle.strokePath.constructPath(p0s,p1s,p2s,q0s,q1s,q2s,colours0,colours1,bufferix);
/*
// test a couple of curves
var i = 0;
p0s.setXY(i, 0,0);
p1s.setXY(i, 102,0);
p2s.setXY(i, 202,25);
q0s.setXY(i, 0,0 + 50);
q1s.setXY(i, 102,0 + 50);
q2s.setXY(i, 202,25);
*/
}
function onWindowResize( event ) {
/*
camera.left = window.innerWidth / - 2;
camera.right = window.innerWidth / 2;
camera.top = window.innerHeight / 2;
camera.bottom = window.innerHeight / - 2;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
*/
}
//
function animate() {
requestAnimationFrame( animate );
if(ready && imageDataLoaded) {
bufferix = 0;
// console.log("imageDataLoaded", imageDataLoaded);
drawParticleUpdate();
grid.setDrawCount(bufferix);
//console.log(bufferix);
// update
p0s.needsUpdate = true;
p1s.needsUpdate = true;
p2s.needsUpdate = true;
q0s.needsUpdate = true;
q1s.needsUpdate = true;
q2s.needsUpdate = true;
colours0.needsUpdate =true;
colours1.needsUpdate =true;
render();
}
// stats.update();
//controls.update(); // required if controls.enableDamping = true, or if controls.autoRotate = true
}
var lastTime = 0;
var moveQ = ( new THREE.Quaternion( .5, .5, .5, 0.0 ) ).normalize();
var tmpQ = new THREE.Quaternion();
var currentQ = new THREE.Quaternion();<|fim▁hole|>function render() {
var time = performance.now();
if(ready) {
//var object = scene.children[0];
var x;
var age;
var introDuration = 0.2;
var outroDuration = 0.2;
var r;
// endRs.needsUpdate = true;
// startRs.needsUpdate = true;
}
//renderer.autoClear = false;
renderer.render( scene, camera );
// pathobj.update();
lastTime = time;
}
init();
animate();<|fim▁end|> | |
<|file_name|>union.js<|end_file_name|><|fim▁begin|>/** @jsx jsx */
import { Editor } from 'slate'
import { jsx } from '../..'
export const input = (
<editor><|fim▁hole|> n
<mark key="b">
e<focus />
</mark>
</block>
</editor>
)
export const run = editor => {
return Array.from(Editor.activeMarks(editor, { union: true }))
}
export const output = [{ key: 'a' }, { key: 'b' }]<|fim▁end|> | <block>
<mark key="a">
<anchor />o
</mark> |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls.defaults import *
<|fim▁hole|>urlpatterns = patterns('transaction.views',
(r'^create/(?P<pid>.*)$', 'create'),
(r'^edit/(?P<tid>.*)$', 'edit'),
)<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.