prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>api.rs<|end_file_name|><|fim▁begin|>use std::collections::BTreeMap;
use anyhow::Result;
use lazy_static::lazy_static;
use super::WindowsEmulator;
pub enum CallingConvention {
Stdcall,
Cdecl,
}
pub struct ArgumentDescriptor {
pub ty: String,
pub name: String,
}
pub struct FunctionDescriptor {
pub calling_convention: CallingConvention,
pub return_type: String,
pub arguments: Vec<ArgumentDescriptor>,
}
type Hook = Box<dyn Fn(&mut dyn WindowsEmulator, &FunctionDescriptor) -> Result<()> + Send + Sync>;
lazy_static! {
pub static ref API: BTreeMap<String, FunctionDescriptor> = {
let mut m = BTreeMap::new();
// populate from: https://github.com/microsoft/windows-rs/blob/master/.windows/winmd/Windows.Win32.winmd
// alternative source: https://github.com/vivisect/vivisect/blob/master/vivisect/impapi/windows/i386.py
// alternative source: https://github.com/fireeye/speakeasy/blob/88502c6eb99dd21ca6ebdcba3edff42c9c2c1bf8/speakeasy/winenv/api/usermode/kernel32.py#L1192
m.insert(
String::from("kernel32.dll!GetVersionExA"),
FunctionDescriptor {
calling_convention: CallingConvention::Stdcall,
return_type: String::from("bool"),
arguments: vec![
ArgumentDescriptor {
ty: String::from("LPOSVERSIONINFOA"),
name: String::from("lpVersionInformation"),
}
]
}
);
m
};
pub static ref HOOKS: BTreeMap<String, Hook> = {
let mut m = BTreeMap::new();
m.insert(
String::from("kernel32.dll!GetVersionExA"),
Box::new(
move |emu: &mut dyn WindowsEmulator, desc: &FunctionDescriptor| -> Result<()> {
let ra = emu.pop()?;
emu.set_pc(ra);
// this is 32-bit land
if let CallingConvention::Stdcall = desc.calling_convention {
for _ in 0..desc.arguments.len() {
let _ = emu.pop()?;
}
}
// TODO:
// this is 64-bit
// emu.inner.set_rax(0);
//emu.handle_return(0, desc)?;
Ok(())
}
) as Hook
);
m
};<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>query-generator.test.js<|end_file_name|><|fim▁begin|>'use strict';
const chai = require('chai'),
expect = chai.expect,
Support = require(__dirname + '/../../support'),
DataTypes = require(__dirname + '/../../../../lib/data-types'),
dialect = Support.getTestDialect(),
_ = require('lodash'),
moment = require('moment'),
QueryGenerator = require('../../../../lib/dialects/sqlite/query-generator');
if (dialect === 'sqlite') {
describe('[SQLITE Specific] QueryGenerator', () => {
beforeEach(function() {
this.User = this.sequelize.define('User', {
username: DataTypes.STRING
});
return this.User.sync({ force: true });
});
const suites = {
arithmeticQuery: [
{
title:'Should use the plus operator',
arguments: ['+', 'myTable', { foo: 'bar' }, {}],
expectation: 'UPDATE `myTable` SET `foo`=`foo`+ \'bar\' '
},
{
title:'Should use the plus operator with where clause',
arguments: ['+', 'myTable', { foo: 'bar' }, { bar: 'biz'}],
expectation: 'UPDATE `myTable` SET `foo`=`foo`+ \'bar\' WHERE `bar` = \'biz\''
},
{
title:'Should use the minus operator',
arguments: ['-', 'myTable', { foo: 'bar' }],
expectation: 'UPDATE `myTable` SET `foo`=`foo`- \'bar\' '
},
{
title:'Should use the minus operator with negative value',
arguments: ['-', 'myTable', { foo: -1 }],
expectation: 'UPDATE `myTable` SET `foo`=`foo`- -1 '
},
{
title:'Should use the minus operator with where clause',
arguments: ['-', 'myTable', { foo: 'bar' }, { bar: 'biz'}],
expectation: 'UPDATE `myTable` SET `foo`=`foo`- \'bar\' WHERE `bar` = \'biz\''
}
],
attributesToSQL: [
{
arguments: [{id: 'INTEGER'}],
expectation: {id: 'INTEGER'}
},
{
arguments: [{id: 'INTEGER', foo: 'VARCHAR(255)'}],
expectation: {id: 'INTEGER', foo: 'VARCHAR(255)'}
},
{
arguments: [{id: {type: 'INTEGER'}}],
expectation: {id: 'INTEGER'}
},
{
arguments: [{id: {type: 'INTEGER', allowNull: false}}],
expectation: {id: 'INTEGER NOT NULL'}
},
{
arguments: [{id: {type: 'INTEGER', allowNull: true}}],
expectation: {id: 'INTEGER'}
},
{
arguments: [{id: {type: 'INTEGER', primaryKey: true, autoIncrement: true}}],
expectation: {id: 'INTEGER PRIMARY KEY AUTOINCREMENT'}
},
{
arguments: [{id: {type: 'INTEGER', defaultValue: 0}}],
expectation: {id: 'INTEGER DEFAULT 0'}
},
{
arguments: [{id: {type: 'INTEGER', defaultValue: undefined}}],
expectation: {id: 'INTEGER'}
},
{
arguments: [{id: {type: 'INTEGER', unique: true}}],
expectation: {id: 'INTEGER UNIQUE'}
},
// New references style
{
arguments: [{id: {type: 'INTEGER', references: { model: 'Bar' }}}],
expectation: {id: 'INTEGER REFERENCES `Bar` (`id`)'}
},
{
arguments: [{id: {type: 'INTEGER', references: { model: 'Bar', key: 'pk' }}}],
expectation: {id: 'INTEGER REFERENCES `Bar` (`pk`)'}
},
{
arguments: [{id: {type: 'INTEGER', references: { model: 'Bar' }, onDelete: 'CASCADE'}}],
expectation: {id: 'INTEGER REFERENCES `Bar` (`id`) ON DELETE CASCADE'}
},
{
arguments: [{id: {type: 'INTEGER', references: { model: 'Bar' }, onUpdate: 'RESTRICT'}}],
expectation: {id: 'INTEGER REFERENCES `Bar` (`id`) ON UPDATE RESTRICT'}
},
{
arguments: [{id: {type: 'INTEGER', allowNull: false, defaultValue: 1, references: { model: 'Bar' }, onDelete: 'CASCADE', onUpdate: 'RESTRICT'}}],
expectation: {id: 'INTEGER NOT NULL DEFAULT 1 REFERENCES `Bar` (`id`) ON DELETE CASCADE ON UPDATE RESTRICT'}
}
],
createTableQuery: [
{
arguments: ['myTable', {data: 'BLOB'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`data` BLOB);'
},
{
arguments: ['myTable', {data: 'LONGBLOB'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`data` LONGBLOB);'
},
{
arguments: ['myTable', {title: 'VARCHAR(255)', name: 'VARCHAR(255)'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`title` VARCHAR(255), `name` VARCHAR(255));'
},
{
arguments: ['myTable', {title: 'VARCHAR BINARY(255)', number: 'INTEGER(5) UNSIGNED PRIMARY KEY '}], // length and unsigned are not allowed on primary key
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`title` VARCHAR BINARY(255), `number` INTEGER PRIMARY KEY);'
},
{
arguments: ['myTable', {title: 'ENUM("A", "B", "C")', name: 'VARCHAR(255)'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`title` ENUM(\"A\", \"B\", \"C\"), `name` VARCHAR(255));'
},
{
arguments: ['myTable', {title: 'VARCHAR(255)', name: 'VARCHAR(255)', id: 'INTEGER PRIMARY KEY'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`title` VARCHAR(255), `name` VARCHAR(255), `id` INTEGER PRIMARY KEY);'
},
{
arguments: ['myTable', {title: 'VARCHAR(255)', name: 'VARCHAR(255)', otherId: 'INTEGER REFERENCES `otherTable` (`id`) ON DELETE CASCADE ON UPDATE NO ACTION'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`title` VARCHAR(255), `name` VARCHAR(255), `otherId` INTEGER REFERENCES `otherTable` (`id`) ON DELETE CASCADE ON UPDATE NO ACTION);'
},
{
arguments: ['myTable', {id: 'INTEGER PRIMARY KEY AUTOINCREMENT', name: 'VARCHAR(255)'}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` VARCHAR(255));'
},
{
arguments: ['myTable', {id: 'INTEGER PRIMARY KEY AUTOINCREMENT', name: 'VARCHAR(255)', surname: 'VARCHAR(255)'}, {uniqueKeys: {uniqueConstraint: {fields: ['name', 'surname']}}}],
expectation: 'CREATE TABLE IF NOT EXISTS `myTable` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` VARCHAR(255), `surname` VARCHAR(255), UNIQUE (`name`, `surname`));'
}
],
selectQuery: [
{
arguments: ['myTable'],
expectation: 'SELECT * FROM `myTable`;',
context: QueryGenerator
}, {
arguments: ['myTable', {attributes: ['id', 'name']}],
expectation: 'SELECT `id`, `name` FROM `myTable`;',
context: QueryGenerator
}, {
arguments: ['myTable', {where: {id: 2}}],
expectation: 'SELECT * FROM `myTable` WHERE `myTable`.`id` = 2;',
context: QueryGenerator
}, {
arguments: ['myTable', {where: {name: 'foo'}}],
expectation: "SELECT * FROM `myTable` WHERE `myTable`.`name` = 'foo';",
context: QueryGenerator
}, {
arguments: ['myTable', {where: {name: "foo';DROP TABLE myTable;"}}],
expectation: "SELECT * FROM `myTable` WHERE `myTable`.`name` = 'foo\'\';DROP TABLE myTable;';",
context: QueryGenerator
}, {
arguments: ['myTable', {where: 2}],
expectation: 'SELECT * FROM `myTable` WHERE `myTable`.`id` = 2;',
context: QueryGenerator
}, {
arguments: ['foo', { attributes: [['count(*)', 'count']] }],
expectation: 'SELECT count(*) AS `count` FROM `foo`;',
context: QueryGenerator
}, {
arguments: ['myTable', {order: ['id']}],
expectation: 'SELECT * FROM `myTable` ORDER BY `id`;',
context: QueryGenerator
}, {
arguments: ['myTable', {order: ['id', 'DESC']}],
expectation: 'SELECT * FROM `myTable` ORDER BY `id`, `DESC`;',
context: QueryGenerator
}, {
arguments: ['myTable', {order: ['myTable.id']}],
expectation: 'SELECT * FROM `myTable` ORDER BY `myTable`.`id`;',
context: QueryGenerator
}, {
arguments: ['myTable', {order: [['myTable.id', 'DESC']]}],
expectation: 'SELECT * FROM `myTable` ORDER BY `myTable`.`id` DESC;',
context: QueryGenerator
}, {
arguments: ['myTable', {order: [['id', 'DESC']]}, function(sequelize) {return sequelize.define('myTable', {});}],
expectation: 'SELECT * FROM `myTable` AS `myTable` ORDER BY `myTable`.`id` DESC;',
context: QueryGenerator,
needsSequelize: true
}, {
arguments: ['myTable', {order: [['id', 'DESC'], ['name']]}, function(sequelize) {return sequelize.define('myTable', {});}],
expectation: 'SELECT * FROM `myTable` AS `myTable` ORDER BY `myTable`.`id` DESC, `myTable`.`name`;',
context: QueryGenerator,
needsSequelize: true
}, {
title: 'sequelize.where with .fn as attribute and default comparator',
arguments: ['myTable', function(sequelize) {
return {
where: sequelize.and(
sequelize.where(sequelize.fn('LOWER', sequelize.col('user.name')), 'jan'),
{ type: 1 }
)
};
}],
expectation: "SELECT * FROM `myTable` WHERE (LOWER(`user`.`name`) = 'jan' AND `myTable`.`type` = 1);",
context: QueryGenerator,
needsSequelize: true
}, {
title: 'sequelize.where with .fn as attribute and LIKE comparator',
arguments: ['myTable', function(sequelize) {
return {
where: sequelize.and(
sequelize.where(sequelize.fn('LOWER', sequelize.col('user.name')), 'LIKE', '%t%'),
{ type: 1 }
)
};
}],
expectation: "SELECT * FROM `myTable` WHERE (LOWER(`user`.`name`) LIKE '%t%' AND `myTable`.`type` = 1);",
context: QueryGenerator,
needsSequelize: true
}, {
title: 'functions can take functions as arguments',
arguments: ['myTable', function(sequelize) {
return {
order: [[sequelize.fn('f1', sequelize.fn('f2', sequelize.col('id'))), 'DESC']]
};
}],
expectation: 'SELECT * FROM `myTable` ORDER BY f1(f2(`id`)) DESC;',
context: QueryGenerator,
needsSequelize: true
}, {
title: 'functions can take all types as arguments',
arguments: ['myTable', function(sequelize) {
return {
order: [
[sequelize.fn('f1', sequelize.col('myTable.id')), 'DESC'],
[sequelize.fn('f2', 12, 'lalala', new Date(Date.UTC(2011, 2, 27, 10, 1, 55))), 'ASC']
]
};
}],
expectation: "SELECT * FROM `myTable` ORDER BY f1(`myTable`.`id`) DESC, f2(12, 'lalala', '2011-03-27 10:01:55.000 +00:00') ASC;",
context: QueryGenerator,
needsSequelize: true
}, {
title: 'single string argument is not quoted',
arguments: ['myTable', {group: 'name'}],
expectation: 'SELECT * FROM `myTable` GROUP BY name;',
context: QueryGenerator
}, {
arguments: ['myTable', {group: ['name']}],
expectation: 'SELECT * FROM `myTable` GROUP BY `name`;',
context: QueryGenerator
}, {
title: 'functions work for group by',
arguments: ['myTable', function(sequelize) {
return {
group: [sequelize.fn('YEAR', sequelize.col('createdAt'))]
};
}],
expectation: 'SELECT * FROM `myTable` GROUP BY YEAR(`createdAt`);',
context: QueryGenerator,
needsSequelize: true
}, {
title: 'It is possible to mix sequelize.fn and string arguments to group by',
arguments: ['myTable', function(sequelize) {
return {
group: [sequelize.fn('YEAR', sequelize.col('createdAt')), 'title']
};
}],
expectation: 'SELECT * FROM `myTable` GROUP BY YEAR(`createdAt`), `title`;',
context: QueryGenerator,
needsSequelize: true
}, {
arguments: ['myTable', {group: ['name', 'title']}],
expectation: 'SELECT * FROM `myTable` GROUP BY `name`, `title`;',
context: QueryGenerator
}, {
arguments: ['myTable', {group: 'name', order: [['id', 'DESC']]}],
expectation: 'SELECT * FROM `myTable` GROUP BY name ORDER BY `id` DESC;',
context: QueryGenerator
}, {
title: 'HAVING clause works with where-like hash',
arguments: ['myTable', function(sequelize) {
return {
attributes: ['*', [sequelize.fn('YEAR', sequelize.col('createdAt')), 'creationYear']],
group: ['creationYear', 'title'],
having: { creationYear: { gt: 2002 } }
};
}],
expectation: 'SELECT *, YEAR(`createdAt`) AS `creationYear` FROM `myTable` GROUP BY `creationYear`, `title` HAVING `creationYear` > 2002;',
context: QueryGenerator,
needsSequelize: true
}, {
arguments: ['myTable', {limit: 10}],
expectation: 'SELECT * FROM `myTable` LIMIT 10;',
context: QueryGenerator
}, {
arguments: ['myTable', {limit: 10, offset: 2}],
expectation: 'SELECT * FROM `myTable` LIMIT 2, 10;',
context: QueryGenerator
}, {
title: 'uses default limit if only offset is specified',
arguments: ['myTable', {offset: 2}],
expectation: 'SELECT * FROM `myTable` LIMIT 2, 10000000000000;',
context: QueryGenerator
}, {
title: 'multiple where arguments',
arguments: ['myTable', {where: {boat: 'canoe', weather: 'cold'}}],
expectation: "SELECT * FROM `myTable` WHERE `myTable`.`boat` = 'canoe' AND `myTable`.`weather` = 'cold';",
context: QueryGenerator
}, {
title: 'no where arguments (object)',
arguments: ['myTable', {where: {}}],
expectation: 'SELECT * FROM `myTable`;',
context: QueryGenerator
}, {
title: 'no where arguments (string)',
arguments: ['myTable', {where: ['']}],
expectation: 'SELECT * FROM `myTable` WHERE 1=1;',
context: QueryGenerator
}, {
title: 'no where arguments (null)',
arguments: ['myTable', {where: null}],
expectation: 'SELECT * FROM `myTable`;',
context: QueryGenerator
}, {
title: 'buffer as where argument',
arguments: ['myTable', {where: { field: new Buffer('Sequelize')}}],
expectation: "SELECT * FROM `myTable` WHERE `myTable`.`field` = X'53657175656c697a65';",
context: QueryGenerator
}, {
title: 'use != if ne !== null',
arguments: ['myTable', {where: {field: {ne: 0}}}],
expectation: 'SELECT * FROM `myTable` WHERE `myTable`.`field` != 0;',
context: QueryGenerator
}, {
title: 'use IS NOT if ne === null',
arguments: ['myTable', {where: {field: {ne: null}}}],
expectation: 'SELECT * FROM `myTable` WHERE `myTable`.`field` IS NOT NULL;',
context: QueryGenerator
}, {
title: 'use IS NOT if not === BOOLEAN',
arguments: ['myTable', {where: {field: {not: true}}}],
expectation: 'SELECT * FROM `myTable` WHERE `myTable`.`field` IS NOT 1;',
context: QueryGenerator
}, {
title: 'use != if not !== BOOLEAN',
arguments: ['myTable', {where: {field: {not: 3}}}],
expectation: 'SELECT * FROM `myTable` WHERE `myTable`.`field` != 3;',
context: QueryGenerator
}
],
insertQuery: [
{
arguments: ['myTable', { name: 'foo' }],
expectation: "INSERT INTO `myTable` (`name`) VALUES ('foo');"
}, {
arguments: ['myTable', { name: "'bar'" }],
expectation: "INSERT INTO `myTable` (`name`) VALUES ('''bar''');"
}, {
arguments: ['myTable', {data: new Buffer('Sequelize') }],
expectation: "INSERT INTO `myTable` (`data`) VALUES (X'53657175656c697a65');"
}, {
arguments: ['myTable', { name: 'bar', value: null }],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('bar',NULL);"
}, {
arguments: ['myTable', { name: 'bar', value: undefined }],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('bar',NULL);"
}, {
arguments: ['myTable', {name: 'foo', birthday: moment('2011-03-27 10:01:55 +0000', 'YYYY-MM-DD HH:mm:ss Z').toDate()}],
expectation: "INSERT INTO `myTable` (`name`,`birthday`) VALUES ('foo','2011-03-27 10:01:55.000 +00:00');"
}, {
arguments: ['myTable', { name: 'foo', value: true }],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('foo',1);"
}, {
arguments: ['myTable', { name: 'foo', value: false }],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('foo',0);"
}, {
arguments: ['myTable', {name: 'foo', foo: 1, nullValue: null}],
expectation: "INSERT INTO `myTable` (`name`,`foo`,`nullValue`) VALUES ('foo',1,NULL);"
}, {
arguments: ['myTable', {name: 'foo', foo: 1, nullValue: null}],
expectation: "INSERT INTO `myTable` (`name`,`foo`,`nullValue`) VALUES ('foo',1,NULL);",
context: {options: {omitNull: false}}
}, {
arguments: ['myTable', {name: 'foo', foo: 1, nullValue: null}],
expectation: "INSERT INTO `myTable` (`name`,`foo`) VALUES ('foo',1);",
context: {options: {omitNull: true}}
}, {
arguments: ['myTable', {name: 'foo', foo: 1, nullValue: undefined}],
expectation: "INSERT INTO `myTable` (`name`,`foo`) VALUES ('foo',1);",
context: {options: {omitNull: true}}
}, {
arguments: ['myTable', function(sequelize) {
return {
foo: sequelize.fn('NOW')
};
}],
expectation: 'INSERT INTO `myTable` (`foo`) VALUES (NOW());',
needsSequelize: true
}
],
bulkInsertQuery: [
{
arguments: ['myTable', [{name: 'foo'}, {name: 'bar'}]],
expectation: "INSERT INTO `myTable` (`name`) VALUES ('foo'),('bar');"
}, {
arguments: ['myTable', [{name: "'bar'"}, {name: 'foo'}]],
expectation: "INSERT INTO `myTable` (`name`) VALUES ('''bar'''),('foo');"
}, {
arguments: ['myTable', [{name: 'foo', birthday: moment('2011-03-27 10:01:55 +0000', 'YYYY-MM-DD HH:mm:ss Z').toDate()}, {name: 'bar', birthday: moment('2012-03-27 10:01:55 +0000', 'YYYY-MM-DD HH:mm:ss Z').toDate()}]],
expectation: "INSERT INTO `myTable` (`name`,`birthday`) VALUES ('foo','2011-03-27 10:01:55.000 +00:00'),('bar','2012-03-27 10:01:55.000 +00:00');"
}, {
arguments: ['myTable', [{name: 'bar', value: null}, {name: 'foo', value: 1}]],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('bar',NULL),('foo',1);"
}, {
arguments: ['myTable', [{name: 'bar', value: undefined}, {name: 'bar', value: 2}]],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('bar',NULL),('bar',2);"
}, {
arguments: ['myTable', [{name: 'foo', value: true}, {name: 'bar', value: false}]],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('foo',1),('bar',0);"
}, {
arguments: ['myTable', [{name: 'foo', value: false}, {name: 'bar', value: false}]],
expectation: "INSERT INTO `myTable` (`name`,`value`) VALUES ('foo',0),('bar',0);"
}, {
arguments: ['myTable', [{name: 'foo', foo: 1, nullValue: null}, {name: 'bar', foo: 2, nullValue: null}]],
expectation: "INSERT INTO `myTable` (`name`,`foo`,`nullValue`) VALUES ('foo',1,NULL),('bar',2,NULL);"
}, {
arguments: ['myTable', [{name: 'foo', foo: 1, nullValue: null}, {name: 'bar', foo: 2, nullValue: null}]],
expectation: "INSERT INTO `myTable` (`name`,`foo`,`nullValue`) VALUES ('foo',1,NULL),('bar',2,NULL);",
context: {options: {omitNull: false}}
}, {
arguments: ['myTable', [{name: 'foo', foo: 1, nullValue: null}, {name: 'bar', foo: 2, nullValue: null}]],
expectation: "INSERT INTO `myTable` (`name`,`foo`,`nullValue`) VALUES ('foo',1,NULL),('bar',2,NULL);",
context: {options: {omitNull: true}} // Note: We don't honour this because it makes little sense when some rows may have nulls and others not
}, {
arguments: ['myTable', [{name: 'foo', foo: 1, nullValue: null}, {name: 'bar', foo: 2, nullValue: null}]],
expectation: "INSERT INTO `myTable` (`name`,`foo`,`nullValue`) VALUES ('foo',1,NULL),('bar',2,NULL);",
context: {options: {omitNull: true}} // Note: As above
}, {
arguments: ['myTable', [{name: 'foo'}, {name: 'bar'}], {ignoreDuplicates: true}],
expectation: "INSERT OR IGNORE INTO `myTable` (`name`) VALUES ('foo'),('bar');"
}
],
updateQuery: [
{
arguments: ['myTable', {name: 'foo', birthday: moment('2011-03-27 10:01:55 +0000', 'YYYY-MM-DD HH:mm:ss Z').toDate()}, {id: 2}],
expectation: "UPDATE `myTable` SET `name`='foo',`birthday`='2011-03-27 10:01:55.000 +00:00' WHERE `id` = 2"
}, {
arguments: ['myTable', {name: 'foo', birthday: moment('2011-03-27 10:01:55 +0000', 'YYYY-MM-DD HH:mm:ss Z').toDate()}, {id: 2}],
expectation: "UPDATE `myTable` SET `name`='foo',`birthday`='2011-03-27 10:01:55.000 +00:00' WHERE `id` = 2"
}, {
arguments: ['myTable', { name: 'foo' }, { id: 2 }],
expectation: "UPDATE `myTable` SET `name`='foo' WHERE `id` = 2"
}, {
arguments: ['myTable', { name: "'bar'" }, { id: 2 }],
expectation: "UPDATE `myTable` SET `name`='''bar''' WHERE `id` = 2"
}, {
arguments: ['myTable', { name: 'bar', value: null }, { id: 2 }],
expectation: "UPDATE `myTable` SET `name`='bar',`value`=NULL WHERE `id` = 2"
}, {
arguments: ['myTable', { name: 'bar', value: undefined }, { id: 2 }],
expectation: "UPDATE `myTable` SET `name`='bar',`value`=NULL WHERE `id` = 2"
}, {
arguments: ['myTable', { flag: true }, { id: 2 }],
expectation: 'UPDATE `myTable` SET `flag`=1 WHERE `id` = 2'
}, {
arguments: ['myTable', { flag: false }, { id: 2 }],
expectation: 'UPDATE `myTable` SET `flag`=0 WHERE `id` = 2'
}, {
arguments: ['myTable', {bar: 2, nullValue: null}, {name: 'foo'}],
expectation: "UPDATE `myTable` SET `bar`=2,`nullValue`=NULL WHERE `name` = 'foo'"
}, {
arguments: ['myTable', {bar: 2, nullValue: null}, {name: 'foo'}],
expectation: "UPDATE `myTable` SET `bar`=2,`nullValue`=NULL WHERE `name` = 'foo'",
context: {options: {omitNull: false}}
}, {
arguments: ['myTable', {bar: 2, nullValue: null}, {name: 'foo'}],<|fim▁hole|> }, {
arguments: ['myTable', function(sequelize) {
return {
bar: sequelize.fn('NOW')
};
}, {name: 'foo'}],
expectation: "UPDATE `myTable` SET `bar`=NOW() WHERE `name` = 'foo'",
needsSequelize: true
}, {
arguments: ['myTable', function(sequelize) {
return {
bar: sequelize.col('foo')
};
}, {name: 'foo'}],
expectation: "UPDATE `myTable` SET `bar`=`foo` WHERE `name` = 'foo'",
needsSequelize: true
}
],
renameColumnQuery: [
{
title: 'Properly quotes column names',
arguments: ['myTable', 'foo', 'commit', {commit: 'VARCHAR(255)', bar: 'VARCHAR(255)'}],
expectation:
'CREATE TEMPORARY TABLE IF NOT EXISTS `myTable_backup` (`commit` VARCHAR(255), `bar` VARCHAR(255));' +
'INSERT INTO `myTable_backup` SELECT `foo` AS `commit`, `bar` FROM `myTable`;' +
'DROP TABLE `myTable`;' +
'CREATE TABLE IF NOT EXISTS `myTable` (`commit` VARCHAR(255), `bar` VARCHAR(255));' +
'INSERT INTO `myTable` SELECT `commit`, `bar` FROM `myTable_backup`;' +
'DROP TABLE `myTable_backup`;'
}
],
removeColumnQuery: [
{
title: 'Properly quotes column names',
arguments: ['myTable', {commit: 'VARCHAR(255)', bar: 'VARCHAR(255)'}],
expectation:
'CREATE TEMPORARY TABLE IF NOT EXISTS `myTable_backup` (`commit` VARCHAR(255), `bar` VARCHAR(255));' +
'INSERT INTO `myTable_backup` SELECT `commit`, `bar` FROM `myTable`;' +
'DROP TABLE `myTable`;' +
'CREATE TABLE IF NOT EXISTS `myTable` (`commit` VARCHAR(255), `bar` VARCHAR(255));' +
'INSERT INTO `myTable` SELECT `commit`, `bar` FROM `myTable_backup`;' +
'DROP TABLE `myTable_backup`;'
}
]
};
_.each(suites, (tests, suiteTitle) => {
describe(suiteTitle, () => {
tests.forEach(test => {
const title = test.title || 'SQLite correctly returns ' + test.expectation + ' for ' + JSON.stringify(test.arguments);
it(title, function() {
// Options would normally be set by the query interface that instantiates the query-generator, but here we specify it explicitly
const context = test.context || {options: {}};
if (test.needsSequelize) {
if (_.isFunction(test.arguments[1])) test.arguments[1] = test.arguments[1](this.sequelize);
if (_.isFunction(test.arguments[2])) test.arguments[2] = test.arguments[2](this.sequelize);
}
QueryGenerator.options = _.assign(context.options, { timezone: '+00:00' });
QueryGenerator._dialect = this.sequelize.dialect;
QueryGenerator.sequelize = this.sequelize;
const conditions = QueryGenerator[suiteTitle].apply(QueryGenerator, test.arguments);
expect(conditions).to.deep.equal(test.expectation);
});
});
});
});
});
}<|fim▁end|>
|
expectation: "UPDATE `myTable` SET `bar`=2 WHERE `name` = 'foo'",
context: {options: {omitNull: true}}
|
<|file_name|>handleprops.py<|end_file_name|><|fim▁begin|># GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
<|fim▁hole|># list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions to retrieve properties from a window handle
These are implemented in a procedural way so as to to be
useful to other modules with the least conceptual overhead
"""
import warnings
import win32process
import win32api
import win32con
import win32gui
from ctypes import wintypes
from ctypes import WINFUNCTYPE
from ctypes import c_int
from ctypes import byref
from ctypes import sizeof
from ctypes import create_unicode_buffer
from . import win32functions
from . import win32defines
from . import win32structures
from .actionlogger import ActionLogger
#=========================================================================
def text(handle):
"""Return the text of the window"""
class_name = classname(handle)
if class_name == 'IME':
return 'Default IME'
if class_name == 'MSCTFIME UI':
return 'M'
if class_name is None:
return ''
#length = win32functions.SendMessage(handle, win32defines.WM_GETTEXTLENGTH, 0, 0)
# XXX: there are some very rare cases when WM_GETTEXTLENGTH hangs!
# WM_GETTEXTLENGTH may hang even for notepad.exe main window!
c_length = win32structures.DWORD_PTR(0)
result = win32functions.SendMessageTimeout(
handle,
win32defines.WM_GETTEXTLENGTH,
0,
0,
win32defines.SMTO_ABORTIFHUNG,
500,
byref(c_length)
)
if result == 0:
ActionLogger().log('WARNING! Cannot retrieve text length for handle = ' + str(handle))
return ''
else:
length = c_length.value
textval = ''
# In some rare cases, the length returned by WM_GETTEXTLENGTH is <0.
# Guard against this by checking it is >0 (==0 is not of interest):
if length > 0:
length += 1
buffer_ = create_unicode_buffer(length)
ret = win32functions.SendMessage(
handle, win32defines.WM_GETTEXT, length, byref(buffer_))
if ret:
textval = buffer_.value
return textval
#=========================================================================
def classname(handle):
"""Return the class name of the window"""
if handle is None:
return None
class_name = create_unicode_buffer(u"", 257)
win32functions.GetClassName(handle, class_name, 256)
return class_name.value
#=========================================================================
def parent(handle):
"""Return the handle of the parent of the window"""
return win32functions.GetParent(handle)
#=========================================================================
def style(handle):
"""Return the style of the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_STYLE)
#=========================================================================
def exstyle(handle):
"""Return the extended style of the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_EXSTYLE)
#=========================================================================
def controlid(handle):
"""Return the ID of the control"""
return win32functions.GetWindowLong(handle, win32defines.GWL_ID)
#=========================================================================
def userdata(handle):
"""Return the value of any user data associated with the window"""
return win32functions.GetWindowLong(handle, win32defines.GWL_USERDATA)
#=========================================================================
def contexthelpid(handle):
"""Return the context help id of the window"""
return win32functions.GetWindowContextHelpId(handle)
#=========================================================================
def iswindow(handle):
"""Return True if the handle is a window"""
return False if handle is None else bool(win32functions.IsWindow(handle))
#=========================================================================
def isvisible(handle):
"""Return True if the window is visible"""
return False if handle is None else bool(win32functions.IsWindowVisible(handle))
#=========================================================================
def isunicode(handle):
"""Return True if the window is a Unicode window"""
return False if handle is None else bool(win32functions.IsWindowUnicode(handle))
#=========================================================================
def isenabled(handle):
"""Return True if the window is enabled"""
return False if handle is None else bool(win32functions.IsWindowEnabled(handle))
#=========================================================================
def is64bitprocess(process_id):
"""Return True if the specified process is a 64-bit process on x64
Return False if it is only a 32-bit process running under Wow64.
Always return False for x86.
"""
from .sysinfo import is_x64_OS
is32 = True
if is_x64_OS():
phndl = win32api.OpenProcess(win32con.MAXIMUM_ALLOWED, 0, process_id)
if phndl:
is32 = win32process.IsWow64Process(phndl)
#print("is64bitprocess, is32: %d, procid: %d" % (is32, process_id))
return (not is32)
#=========================================================================
def is64bitbinary(filename):
"""Check if the file is 64-bit binary"""
import win32file
try:
binary_type = win32file.GetBinaryType(filename)
return binary_type != win32file.SCS_32BIT_BINARY
except Exception as exc:
warnings.warn('Cannot get binary type for file "{}". Error: {}'
.format(filename, exc), RuntimeWarning, stacklevel=2)
return None
#=========================================================================
def clientrect(handle):
"""Return the client rectangle of the control"""
client_rect = win32structures.RECT()
win32functions.GetClientRect(handle, byref(client_rect))
return client_rect
#=========================================================================
def rectangle(handle):
"""Return the rectangle of the window"""
rect = win32structures.RECT()
win32functions.GetWindowRect(handle, byref(rect))
return rect
#=========================================================================
def font(handle):
"""Return the font as a LOGFONTW of the window"""
# get the font handle
if handle is None:
handle = 0 # make sure we don't pass window handle down as None
font_handle = win32functions.SendMessage(
handle, win32defines.WM_GETFONT, 0, 0)
# if the fondUsed is 0 then the control is using the
# system font (well probably not - even though that is what the docs say)
# instead we switch to the default GUI font - which is more likely correct.
if not font_handle:
# So just get the default system font
font_handle = win32functions.GetStockObject(win32defines.DEFAULT_GUI_FONT)
# if we still don't have a font!
# ----- ie, we're on an antiquated OS, like NT 3.51
if not font_handle:
# ----- On Asian platforms, ANSI font won't show.
if win32functions.GetSystemMetrics(win32defines.SM_DBCSENABLED):
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.SYSTEM_FONT)
else:
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.ANSI_VAR_FONT)
# Get the Logfont structure of the font of the control
fontval = win32structures.LOGFONTW()
ret = win32functions.GetObject(
font_handle, sizeof(fontval), byref(fontval))
# The function could not get the font - this is probably
# because the control does not have associated Font/Text
# So we should make sure the elements of the font are zeroed.
if not ret:
fontval = win32structures.LOGFONTW()
# if it is a main window
if is_toplevel_window(handle):
if "MS Shell Dlg" in fontval.lfFaceName or \
fontval.lfFaceName == "System":
# these are not usually the fonts actaully used in for
# title bars so we need to get the default title bar font
# get the title font based on the system metrics rather
# than the font of the control itself
ncms = win32structures.NONCLIENTMETRICSW()
ncms.cbSize = sizeof(ncms)
win32functions.SystemParametersInfo(
win32defines.SPI_GETNONCLIENTMETRICS,
sizeof(ncms),
byref(ncms),
0)
# with either of the following 2 flags set the font of the
# dialog isthe small one (but there is normally no difference!
if has_style(handle, win32defines.WS_EX_TOOLWINDOW) or \
has_style(handle, win32defines.WS_EX_PALETTEWINDOW):
fontval = ncms.lfSmCaptionFont
else:
fontval = ncms.lfCaptionFont
return fontval
#=========================================================================
def processid(handle):
"""Return the ID of process that controls this window"""
pid = wintypes.DWORD()
win32functions.GetWindowThreadProcessId(handle, byref(pid))
return pid.value
#=========================================================================
def has_enough_privileges(process_id):
"""Check if target process has enough rights to query GUI actions"""
try:
access_level = win32con.PROCESS_QUERY_INFORMATION | win32con.PROCESS_VM_READ
process_handle = win32api.OpenProcess(access_level, 0, process_id)
if process_handle:
win32api.CloseHandle(process_handle)
return True
return False
except win32gui.error:
return False
#=========================================================================
def children(handle):
"""Return a list of handles to the children of this window"""
# this will be filled in the callback function
child_windows = []
# callback function for EnumChildWindows
def enum_child_proc(hwnd, lparam):
"""Called for each child - adds child hwnd to list"""
# append it to our list
child_windows.append(hwnd)
# return true to keep going
return True
# define the child proc type
enum_child_proc_t = WINFUNCTYPE(
c_int, # return type
wintypes.HWND, # the window handle
wintypes.LPARAM) # extra information
# update the proc to the correct type
proc = enum_child_proc_t(enum_child_proc)
# loop over all the children (callback called for each)
win32functions.EnumChildWindows(handle, proc, 0)
return child_windows
#=========================================================================
def has_style(handle, tocheck):
"""Return True if the control has style tocheck"""
hwnd_style = style(handle)
return tocheck & hwnd_style == tocheck
#=========================================================================
def has_exstyle(handle, tocheck):
"""Return True if the control has extended style tocheck"""
hwnd_exstyle = exstyle(handle)
return tocheck & hwnd_exstyle == tocheck
#=========================================================================
def is_toplevel_window(handle):
"""Return whether the window is a top level window or not"""
# only request the style once - this is an optimization over calling
# (handle, style) for each style I wan to check!
style_ = style(handle)
if (style_ & win32defines.WS_OVERLAPPED == win32defines.WS_OVERLAPPED or
style_ & win32defines.WS_CAPTION == win32defines.WS_CAPTION) and \
not (style_ & win32defines.WS_CHILD == win32defines.WS_CHILD):
return True
else:
return False
#=========================================================================
def dumpwindow(handle):
"""Dump a window to a set of properties"""
props = {}
for func in (text,
classname,
rectangle,
clientrect,
style,
exstyle,
contexthelpid,
controlid,
userdata,
font,
parent,
processid,
isenabled,
isunicode,
isvisible,
children,
):
props[func.__name__] = func(handle)
return props<|fim▁end|>
| |
<|file_name|>linux.py<|end_file_name|><|fim▁begin|>from main import KeyboardHandler
import threading
import thread
import pyatspi
def parse(s):
"""parse a string like control+f into (modifier, key).
Unknown modifiers will return ValueError."""
m = 0
lst = s.split('+')
if not len(lst):
return (0, s)
# Are these right?
d = {
"shift": 1 << pyatspi.MODIFIER_SHIFT,
"control": 1 << pyatspi.MODIFIER_CONTROL,
"alt": 1 << pyatspi.MODIFIER_ALT,
"win": 1 << pyatspi.MODIFIER_META3,
}
for item in lst:
if item in d:
m |= d[item]
lst.remove(item)
# end if
if len(lst) > 1: # more than one key, parse error
raise ValueError('unknown modifier %s' % lst[0])
return (m, lst[0].lower())
class AtspiThread(threading.Thread):
def run(self):
pyatspi.Registry.registerKeystrokeListener(handler, kind=(<|fim▁hole|>
def handler(e):
m, k = e.modifiers, e.event_string.lower()
# not sure why we can't catch control+f. Try to fix it.
if (not e.is_text) and e.id >= 97 <= 126:
k = chr(e.id)
if (m, k) not in keys:
return False
thread.start_new(keys[(m, k)], ())
return True # don't pass it on
class LinuxKeyboardHandler(KeyboardHandler):
def __init__(self, *args, **kwargs):
KeyboardHandler.__init__(self, *args, **kwargs)
t = AtspiThread()
t.start()
def register_key(self, key, function):
"""key will be a string, such as control+shift+f.
We need to convert that, using parse_key,
into modifier and key to put into our dictionary."""
# register key so we know if we have it on event receive.
t = parse(key)
keys[t] = function
# if we got this far, the key is valid.
KeyboardHandler.register_key(self, key, function)
def unregister_key(self, key, function):
KeyboardHandler.unregister_key(self, key, function)
del keys[parse(key)]<|fim▁end|>
|
pyatspi.KEY_PRESSED_EVENT,), mask=pyatspi.allModifiers())
pyatspi.Registry.start()
# the keys we registered
keys = {}
|
<|file_name|>htmlfontelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding;
use dom::bindings::codegen::Bindings::HTMLFontElementBinding::HTMLFontElementMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::root::{DomRoot, LayoutDom};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use servo_atoms::Atom;
use style::attr::AttrValue;
use style::str::{HTML_SPACE_CHARACTERS, read_numbers};
#[dom_struct]
pub struct HTMLFontElement {
htmlelement: HTMLElement,
}
impl HTMLFontElement {
fn new_inherited(local_name: LocalName, prefix: Option<Prefix>, document: &Document) -> HTMLFontElement {
HTMLFontElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<Prefix>,
document: &Document) -> DomRoot<HTMLFontElement> {
Node::reflect_node(Box::new(HTMLFontElement::new_inherited(local_name, prefix, document)),
document,
HTMLFontElementBinding::Wrap)
}
}
impl HTMLFontElementMethods for HTMLFontElement {
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_getter!(Color, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-color
make_legacy_color_setter!(SetColor, "color");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_getter!(Face, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-face
make_atomic_setter!(SetFace, "face");
// https://html.spec.whatwg.org/multipage/#dom-font-size
make_getter!(Size, "size");
// https://html.spec.whatwg.org/multipage/#dom-font-size<|fim▁hole|> element.set_attribute(&local_name!("size"), parse_size(&value));
}
}
impl VirtualMethods for HTMLFontElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_affects_presentational_hints(&self, attr: &Attr) -> bool {
if attr.local_name() == &local_name!("color") {
return true;
}
// FIXME: Should also return true for `size` and `face` changes!
self.super_type().unwrap().attribute_affects_presentational_hints(attr)
}
fn parse_plain_attribute(&self, name: &LocalName, value: DOMString) -> AttrValue {
match name {
&local_name!("face") => AttrValue::from_atomic(value.into()),
&local_name!("color") => AttrValue::from_legacy_color(value.into()),
&local_name!("size") => parse_size(&value),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
pub trait HTMLFontElementLayoutHelpers {
fn get_color(&self) -> Option<RGBA>;
fn get_face(&self) -> Option<Atom>;
fn get_size(&self) -> Option<u32>;
}
impl HTMLFontElementLayoutHelpers for LayoutDom<HTMLFontElement> {
#[allow(unsafe_code)]
fn get_color(&self) -> Option<RGBA> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("color"))
.and_then(AttrValue::as_color)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_face(&self) -> Option<Atom> {
unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("face"))
.map(AttrValue::as_atom)
.cloned()
}
}
#[allow(unsafe_code)]
fn get_size(&self) -> Option<u32> {
let size = unsafe {
(*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("size"))
};
match size {
Some(&AttrValue::UInt(_, s)) => Some(s),
_ => None,
}
}
}
/// <https://html.spec.whatwg.org/multipage/#rules-for-parsing-a-legacy-font-size>
fn parse_size(mut input: &str) -> AttrValue {
let original_input = input;
// Steps 1 & 2 are not relevant
// Step 3
input = input.trim_matches(HTML_SPACE_CHARACTERS);
enum ParseMode {
RelativePlus,
RelativeMinus,
Absolute,
}
let mut input_chars = input.chars().peekable();
let parse_mode = match input_chars.peek() {
// Step 4
None => return AttrValue::String(original_input.into()),
// Step 5
Some(&'+') => {
let _ = input_chars.next(); // consume the '+'
ParseMode::RelativePlus
}
Some(&'-') => {
let _ = input_chars.next(); // consume the '-'
ParseMode::RelativeMinus
}
Some(_) => ParseMode::Absolute,
};
// Steps 6, 7, 8
let mut value = match read_numbers(input_chars) {
(Some(v), _) if v >= 0 => v,
_ => return AttrValue::String(original_input.into()),
};
// Step 9
match parse_mode {
ParseMode::RelativePlus => value = 3 + value,
ParseMode::RelativeMinus => value = 3 - value,
ParseMode::Absolute => (),
}
// Steps 10, 11, 12
AttrValue::UInt(original_input.into(), value as u32)
}<|fim▁end|>
|
fn SetSize(&self, value: DOMString) {
let element = self.upcast::<Element>();
|
<|file_name|>appservice.py<|end_file_name|><|fim▁begin|># Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#<|fim▁hole|># See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class AppServiceConfig(Config):
def read_config(self, config):
self.app_service_config_files = config.get("app_service_config_files", [])
def default_config(cls, **kwargs):
return """\
# A list of application service config file to use
app_service_config_files: []
"""<|fim▁end|>
|
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>"""mysite URL Configuration<|fim▁hole|>The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
urlpatterns = [
url(r'^', include('gsn.urls')),
]<|fim▁end|>
| |
<|file_name|>jquery.textillate.js<|end_file_name|><|fim▁begin|>/*
* textillate.js
* http://jschr.github.com/textillate
* MIT licensed
*
* Copyright (C) 2012-2013 Jordan Schroter
*/
(function ($) {
"use strict";
function isInEffect (effect) {
return /In/.test(effect) || $.inArray(effect, $.fn.textillate.defaults.inEffects) >= 0;
};
function isOutEffect (effect) {
return /Out/.test(effect) || $.inArray(effect, $.fn.textillate.defaults.outEffects) >= 0;
};
// custom get data api method
function getData (node) {
var attrs = node.attributes || []
, data = {};
if (!attrs.length) return data;
$.each(attrs, function (i, attr) {
if (/^data-in-*/.test(attr.nodeName)) {
data.in = data.in || {};
data.in[attr.nodeName.replace(/data-in-/, '')] = attr.nodeValue;
} else if (/^data-out-*/.test(attr.nodeName)) {
data.out = data.out || {};
data.out[attr.nodeName.replace(/data-out-/, '')] = attr.nodeValue;
} else if (/^data-*/.test(attr.nodeName)) {
data[attr.nodeName] = attr.nodeValue;
}
})
return data;
}
function shuffle (o) {
for (var j, x, i = o.length; i; j = parseInt(Math.random() * i), x = o[--i], o[i] = o[j], o[j] = x);
return o;
}
function animate ($c, effect, cb) {
$c.addClass('animated ' + effect)
.css('visibility', 'visible')
.show();
$c.one('animationend webkitAnimationEnd oAnimationEnd', function () {
$c.removeClass('animated ' + effect);
cb && cb();
});
}
function animateChars ($chars, options, cb) {
var that = this
, count = $chars.length;
if (!count) {
cb && cb();
return;
}
if (options.shuffle) shuffle($chars);
$chars.each(function (i) {
var $this = $(this);
function complete () {
if (isInEffect(options.effect)) {
$this.css('visibility', 'visible');
} else if (isOutEffect(options.effect)) {
$this.css('visibility', 'hidden');
}
count -= 1;
if (!count && cb) cb();
}
var delay = options.sync ? options.delay : options.delay * i * options.delayScale;
$this.text() ?
setTimeout(function () { animate($this, options.effect, complete) }, delay) :
complete();
})
};
var Textillate = function (element, options) {
var base = this
, $element = $(element);
base.init = function () {
base.$texts = $element.find(options.selector);
if (!base.$texts.length) {
base.$texts = $('<ul class="texts"><li>' + $element.html() + '</li></ul>');
$element.html(base.$texts);
}
base.$texts.hide();
base.$current = $('<span>')
.text(base.$texts.find(':first-child').html())
.prependTo($element);
if (isInEffect(options.effect)) {
base.$current.css('visibility', 'hidden');
} else if (isOutEffect(options.effect)) {<|fim▁hole|> base.$current.css('visibility', 'visible');
}
base.setOptions(options);
setTimeout(function () {
base.options.autoStart && base.start();
}, base.options.initialDelay)
};
base.setOptions = function (options) {
base.options = options;
};
base.start = function (index) {
var $next = base.$texts.find(':nth-child(' + (index || 1) + ')');
(function run ($elem) {
var options = $.extend({}, base.options, getData($elem));
base.$current
.text($elem.html())
.lettering('words');
base.$current.find('[class^="word"]')
.css({
'display': 'inline-block',
// fix for poor ios performance
'-webkit-transform': 'translate3d(0,0,0)',
'-moz-transform': 'translate3d(0,0,0)',
'-o-transform': 'translate3d(0,0,0)',
'transform': 'translate3d(0,0,0)'
})
.each(function () { $(this).lettering() });
var $chars = base.$current.find('[class^="char"]')
.css('display', 'inline-block');
if (isInEffect(options.in.effect)) {
$chars.css('visibility', 'hidden');
} else if (isOutEffect(options.in.effect)) {
$chars.css('visibility', 'visible');
}
animateChars($chars, options.in, function () {
setTimeout(function () {
// in case options have changed
var options = $.extend({}, base.options, getData($elem));
var $next = $elem.next();
if (base.options.loop && !$next.length) {
$next = base.$texts.find(':first-child');
}
if (!$next.length) return;
animateChars($chars, options.out, function () {
run($next)
});
}, base.options.minDisplayTime);
});
}($next));
};
base.init();
}
$.fn.textillate = function (settings, args) {
return this.each(function () {
var $this = $(this)
, data = $this.data('textillate')
, options = $.extend(true, {}, $.fn.textillate.defaults, getData(this), typeof settings == 'object' && settings);
if (!data) {
$this.data('textillate', (data = new Textillate(this, options)));
} else if (typeof settings == 'string') {
data[settings].apply(data, [].concat(args));
} else {
data.setOptions.call(data, options);
}
})
};
$.fn.textillate.defaults = {
selector: '.texts',
loop: false,
minDisplayTime: 2000,
initialDelay: 0,
in: {
effect: 'fadeInLeftBig',
delayScale: 1.5,
delay: 50,
sync: false,
shuffle: false
},
out: {
effect: 'hinge',
delayScale: 1.5,
delay: 50,
sync: false,
shuffle: false,
},
autoStart: true,
inEffects: [],
outEffects: [ 'hinge' ]
};
}(jQuery));<|fim▁end|>
| |
<|file_name|>admin.js<|end_file_name|><|fim▁begin|>jQuery(document).ready(function($){
$( "#remove_sp_league_menu_logo" ).click(function() {
$( ".sp-league-menu-logo-options" ).hide();
$("<input>").attr({
type: "hidden",
id: "sp_league_menu_logo_removed",<|fim▁hole|> name: "sp_league_menu_logo_removed"
}).appendTo( $(this).parent() );
});
$( "#sportspress_league_menu_logo_width" ).on( "input", function() {
$( ".sp-league-menu-logo-options img" ).css( "max-width", $(this).val() + 'px' );
});
$( "#sportspress_league_menu_logo_height" ).on( "input", function() {
$( ".sp-league-menu-logo-options img" ).css( "max-height", $(this).val() + 'px' );
});
});<|fim▁end|>
| |
<|file_name|>blackberry_pim_Attendee.js<|end_file_name|><|fim▁begin|>/*
* Copyright 2010-2011 Research In Motion Limited.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* <div><p>
* The Attendee object is an instance object, where if a new instance is desired, it must be created using the new keyword.
* </p></div>
* @toc {PIM} Attendee
* @BB50+
* @class The Attendee object is used to represent a person who is invited to a calendar appointment.
* @featureID blackberry.pim.Attendee
* @constructor Constructor for a new Attendee object.
* @example
* <script type="text/javascript">
* // Create our Event
* var newAppt = new blackberry.pim.Appointment();
* newAppt.location = "Your office";
* newAppt.summary = "Talk about new project";
* newAppt.freeBusy = 0; // Free
*
* // Create our hour time slot
* var start = new Date();
* newAppt.start = start;
* var end = start.setHours(start.getHours() + 1);
* newAppt.end = end;
*
* // Create Attendee
* var attendees = [];
* var onlyAttendee = new blackberry.pim.Attendee();
* onlyAttendee.address = "[email protected]";
* onlyAttendee.type = blackberry.pim.Attendee.INVITED;
* attendees.push(onlyAttendee);
*
* newAppt.attendees = attendees;
* newAppt.save();
* </script>
*/
blackberry.pim.Attendee = function() { };
/**
* Event organizer
* @type Number
* @constant
* @BB50+
*/
blackberry.pim.Attendee.ORGANIZER = 0;
/**
* Attendee as been invited.
* @type Number
* @constant
* @BB50+
*/
blackberry.pim.Attendee.INVITED = 1;
/**
* Attendee has accepted the invitation.
* @type Number
* @constant
* @BB50+
*/
blackberry.pim.Attendee.ACCEPTED = 2;
/**
* Attendee has declined the invitation.
* @type Number
* @constant
* @BB50+
<|fim▁hole|>* Attendee has tentatively accepted the invitation.
* @type Number
* @constant
* @BB50+
*/
blackberry.pim.Attendee.TENTATIVE = 4;
/**
* Indicates the type of a particular attendee. Value can be one of the Attendee types.
* @type Number
* @BB50+
*/
blackberry.pim.Attendee.prototype.type = { };
/**
* Contains the email address of a particular attendee.
* @type String
* @BB50+
*/
blackberry.pim.Attendee.prototype.address = { };<|fim▁end|>
|
*/
blackberry.pim.Attendee.DECLINED = 3;
/**
|
<|file_name|>associationTestSuite.py<|end_file_name|><|fim▁begin|>import unittest
from TASSELpy.TASSELbridge import TASSELbridge
from TASSELpy.test.net.maizegenetics.analysis.association.FixedEffectLMPlugin import easy_GLMTest
class associationTestSuite(unittest.TestSuite):
def __init__(self):
super(associationTestSuite, self).__init__()<|fim▁hole|> runner.run(associationTestSuite())
TASSELbridge.stop()<|fim▁end|>
|
self.addTest(unittest.makeSuite(easy_GLMTest))
if __name__ == "__main__":
runner = unittest.TextTestRunner()
|
<|file_name|>FlagsSeries.js<|end_file_name|><|fim▁begin|>/* ****************************************************************************
* Start Flags series code *
*****************************************************************************/
var symbols = SVGRenderer.prototype.symbols;
// 1 - set default options
defaultPlotOptions.flags = merge(defaultPlotOptions.column, {
dataGrouping: null,
fillColor: 'white',
lineWidth: 1,
pointRange: 0, // #673
//radius: 2,
shape: 'flag',
stackDistance: 12,
states: {
hover: {
lineColor: 'black',
fillColor: '#FCFFC5'
}
},
style: {
fontSize: '11px',
fontWeight: 'bold',
textAlign: 'center'
},
tooltip: {
pointFormat: '{point.text}<br/>'
},
threshold: null,
y: -30
});
// 2 - Create the CandlestickSeries object
seriesTypes.flags = extendClass(seriesTypes.column, {
type: 'flags',
sorted: false,
noSharedTooltip: true,
takeOrdinalPosition: false, // #1074
trackerGroups: ['markerGroup'],
forceCrop: true,
/**
* Inherit the initialization from base Series
*/
init: Series.prototype.init,
/**
* One-to-one mapping from options to SVG attributes
*/
pointAttrToOptions: { // mapping between SVG attributes and the corresponding options
fill: 'fillColor',
stroke: 'color',
'stroke-width': 'lineWidth',
r: 'radius'
},
/**
* Extend the translate method by placing the point on the related series
*/
translate: function () {
seriesTypes.column.prototype.translate.apply(this);
var series = this,
options = series.options,
chart = series.chart,
points = series.points,
cursor = points.length - 1,
point,
lastPoint,
optionsOnSeries = options.onSeries,
onSeries = optionsOnSeries && chart.get(optionsOnSeries),
step = onSeries && onSeries.options.step,
onData = onSeries && onSeries.points,
i = onData && onData.length,
xAxis = series.xAxis,
xAxisExt = xAxis.getExtremes(),
leftPoint,
lastX,
rightPoint,
currentDataGrouping;
// relate to a master series
if (onSeries && onSeries.visible && i) {
currentDataGrouping = onSeries.currentDataGrouping;
lastX = onData[i - 1].x + (currentDataGrouping ? currentDataGrouping.totalRange : 0); // #2374
// sort the data points
points.sort(function (a, b) {
return (a.x - b.x);
});
while (i-- && points[cursor]) {
point = points[cursor];
leftPoint = onData[i];
if (leftPoint.x <= point.x && leftPoint.plotY !== UNDEFINED) {
if (point.x <= lastX) { // #803
point.plotY = leftPoint.plotY;<|fim▁hole|> // interpolate between points, #666
if (leftPoint.x < point.x && !step) {
rightPoint = onData[i + 1];
if (rightPoint && rightPoint.plotY !== UNDEFINED) {
point.plotY +=
((point.x - leftPoint.x) / (rightPoint.x - leftPoint.x)) * // the distance ratio, between 0 and 1
(rightPoint.plotY - leftPoint.plotY); // the y distance
}
}
}
cursor--;
i++; // check again for points in the same x position
if (cursor < 0) {
break;
}
}
}
}
// Add plotY position and handle stacking
each(points, function (point, i) {
// Undefined plotY means the point is either on axis, outside series range or hidden series.
// If the series is outside the range of the x axis it should fall through with
// an undefined plotY, but then we must remove the shapeArgs (#847).
if (point.plotY === UNDEFINED) {
if (point.x >= xAxisExt.min && point.x <= xAxisExt.max) { // we're inside xAxis range
point.plotY = chart.chartHeight - xAxis.bottom - (xAxis.opposite ? xAxis.height : 0) + xAxis.offset - chart.plotTop;
} else {
point.shapeArgs = {}; // 847
}
}
// if multiple flags appear at the same x, order them into a stack
lastPoint = points[i - 1];
if (lastPoint && lastPoint.plotX === point.plotX) {
if (lastPoint.stackIndex === UNDEFINED) {
lastPoint.stackIndex = 0;
}
point.stackIndex = lastPoint.stackIndex + 1;
}
});
},
/**
* Draw the markers
*/
drawPoints: function () {
var series = this,
pointAttr,
points = series.points,
chart = series.chart,
renderer = chart.renderer,
plotX,
plotY,
options = series.options,
optionsY = options.y,
shape,
i,
point,
graphic,
stackIndex,
crisp = (options.lineWidth % 2 / 2),
anchorX,
anchorY,
outsideRight;
i = points.length;
while (i--) {
point = points[i];
outsideRight = point.plotX > series.xAxis.len;
plotX = point.plotX + (outsideRight ? crisp : -crisp);
stackIndex = point.stackIndex;
shape = point.options.shape || options.shape;
plotY = point.plotY;
if (plotY !== UNDEFINED) {
plotY = point.plotY + optionsY + crisp - (stackIndex !== UNDEFINED && stackIndex * options.stackDistance);
}
anchorX = stackIndex ? UNDEFINED : point.plotX + crisp; // skip connectors for higher level stacked points
anchorY = stackIndex ? UNDEFINED : point.plotY;
graphic = point.graphic;
// only draw the point if y is defined and the flag is within the visible area
if (plotY !== UNDEFINED && plotX >= 0 && !outsideRight) {
// shortcuts
pointAttr = point.pointAttr[point.selected ? 'select' : ''];
if (graphic) { // update
graphic.attr({
x: plotX,
y: plotY,
r: pointAttr.r,
anchorX: anchorX,
anchorY: anchorY
});
} else {
graphic = point.graphic = renderer.label(
point.options.title || options.title || 'A',
plotX,
plotY,
shape,
anchorX,
anchorY,
options.useHTML
)
.css(merge(options.style, point.style))
.attr(pointAttr)
.attr({
align: shape === 'flag' ? 'left' : 'center',
width: options.width,
height: options.height
})
.add(series.markerGroup)
.shadow(options.shadow);
}
// Set the tooltip anchor position
point.tooltipPos = [plotX, plotY];
} else if (graphic) {
point.graphic = graphic.destroy();
}
}
},
/**
* Extend the column trackers with listeners to expand and contract stacks
*/
drawTracker: function () {
var series = this,
points = series.points;
TrackerMixin.drawTrackerPoint.apply(this);
// Bring each stacked flag up on mouse over, this allows readability of vertically
// stacked elements as well as tight points on the x axis. #1924.
each(points, function (point) {
var graphic = point.graphic;
if (graphic) {
addEvent(graphic.element, 'mouseover', function () {
// Raise this point
if (point.stackIndex > 0 && !point.raised) {
point._y = graphic.y;
graphic.attr({
y: point._y - 8
});
point.raised = true;
}
// Revert other raised points
each(points, function (otherPoint) {
if (otherPoint !== point && otherPoint.raised && otherPoint.graphic) {
otherPoint.graphic.attr({
y: otherPoint._y
});
otherPoint.raised = false;
}
});
});
}
});
},
/**
* Disable animation
*/
animate: noop
});
// create the flag icon with anchor
symbols.flag = function (x, y, w, h, options) {
var anchorX = (options && options.anchorX) || x,
anchorY = (options && options.anchorY) || y;
return [
'M', anchorX, anchorY,
'L', x, y + h,
x, y,
x + w, y,
x + w, y + h,
x, y + h,
'M', anchorX, anchorY,
'Z'
];
};
// create the circlepin and squarepin icons with anchor
each(['circle', 'square'], function (shape) {
symbols[shape + 'pin'] = function (x, y, w, h, options) {
var anchorX = options && options.anchorX,
anchorY = options && options.anchorY,
path = symbols[shape](x, y, w, h),
labelTopOrBottomY;
if (anchorX && anchorY) {
// if the label is below the anchor, draw the connecting line from the top edge of the label
// otherwise start drawing from the bottom edge
labelTopOrBottomY = (y > anchorY) ? y : y + h;
path.push('M', anchorX, labelTopOrBottomY, 'L', anchorX, anchorY);
}
return path;
};
});
// The symbol callbacks are generated on the SVGRenderer object in all browsers. Even
// VML browsers need this in order to generate shapes in export. Now share
// them with the VMLRenderer.
if (Renderer === Highcharts.VMLRenderer) {
each(['flag', 'circlepin', 'squarepin'], function (shape) {
VMLRenderer.prototype.symbols[shape] = symbols[shape];
});
}
/* ****************************************************************************
* End Flags series code *
*****************************************************************************/<|fim▁end|>
| |
<|file_name|>LanguageIDE.java<|end_file_name|><|fim▁begin|>package com.github.takezoe.xlsbeans;
import com.github.takezoe.xlsbeans.annotation.Column;
import com.github.takezoe.xlsbeans.annotation.MapColumns;
import java.util.Map;
public class LanguageIDE {
private String name;
private Map<String, String> attributes;
public Map<String, String> getAttributes() {
return attributes;
}
@MapColumns(previousColumnName = "Name")
public void setAttributes(Map<String, String> attributes) {
this.attributes = attributes;
<|fim▁hole|> public String getName() {
return name;
}
@Column(columnName = "Name")
public void setName(String name) {
this.name = name;
}
}<|fim▁end|>
|
}
|
<|file_name|>test_infofile.py<|end_file_name|><|fim▁begin|># Copyright (C) 2013-2016 2ndQuadrant Italia Srl
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
import json
import os
from datetime import datetime
import mock
import pytest
from dateutil.tz import tzlocal, tzoffset
from barman.infofile import (BackupInfo, Field, FieldListFile, WalFileInfo,
load_datetime_tz)
from testing_helpers import build_mocked_server
BASE_BACKUP_INFO = """backup_label=None
begin_offset=40
begin_time=2014-12-22 09:25:22.561207+01:00
begin_wal=000000010000000000000004
begin_xlog=0/4000028
config_file=/fakepath/postgresql.conf
end_offset=184
end_time=2014-12-22 09:25:27.410470+01:00
end_wal=000000010000000000000004
end_xlog=0/40000B8
error=None
hba_file=/fakepath/pg_hba.conf
ident_file=/fakepath/pg_ident.conf
mode=default
pgdata=/fakepath/data
server_name=fake-9.4-server
size=20935690
status=DONE
tablespaces=[('fake_tbs', 16384, '/fake_tmp/tbs')]
timeline=1
version=90400"""
def test_load_datetime_tz():
"""
Unit test for load_datetime_tz function
This test covers all load_datetime_tz code with correct parameters
and checks that a ValueError is raised when called with a bad parameter.
"""
# try to load a tz-less timestamp
assert load_datetime_tz("2012-12-15 10:14:51.898000") == \
datetime(2012, 12, 15, 10, 14, 51, 898000,
tzinfo=tzlocal())
# try to load a tz-aware timestamp
assert load_datetime_tz("2012-12-15 10:14:51.898000 +0100") == \
datetime(2012, 12, 15, 10, 14, 51, 898000,
tzinfo=tzoffset('GMT+1', 3600))
# try to load an incorrect date
with pytest.raises(ValueError):
load_datetime_tz("Invalid datetime")
# noinspection PyMethodMayBeStatic
class TestField(object):
def test_field_creation(self):
field = Field('test_field')
assert field
def test_field_with_arguments(self):
dump_function = str
load_function = int
default = 10
docstring = 'Test Docstring'
field = Field('test_field', dump_function, load_function, default,
docstring)
assert field
assert field.name == 'test_field'
assert field.to_str == dump_function
assert field.from_str == load_function
assert field.default == default
assert field.__doc__ == docstring
def test_field_dump_decorator(self):
test_field = Field('test_field')
dump_function = str
test_field = test_field.dump(dump_function)
assert test_field.to_str == dump_function
def test_field_load_decorator(self):
test_field = Field('test_field')
load_function = int
test_field = test_field.dump(load_function)
assert test_field.to_str == load_function
class DummyFieldListFile(FieldListFile):
dummy = Field('dummy', dump=str, load=int, default=12, doc='dummy_field')
# noinspection PyMethodMayBeStatic
class TestFieldListFile(object):
def test_field_list_file_creation(self):
with pytest.raises(AttributeError):
FieldListFile(test_argument=11)
field = FieldListFile()
assert field
def test_subclass_creation(self):
with pytest.raises(AttributeError):
DummyFieldListFile(test_argument=11)
field = DummyFieldListFile()
assert field
assert field.dummy == 12
field = DummyFieldListFile(dummy=13)
assert field
assert field.dummy == 13
def test_subclass_access(self):
dummy = DummyFieldListFile()
dummy.dummy = 14
assert dummy.dummy == 14
with pytest.raises(AttributeError):
del dummy.dummy
def test_subclass_load(self, tmpdir):
tmp_file = tmpdir.join("test_file")
tmp_file.write('dummy=15\n')
dummy = DummyFieldListFile()
dummy.load(tmp_file.strpath)
assert dummy.dummy == 15
def test_subclass_save(self, tmpdir):
tmp_file = tmpdir.join("test_file")
dummy = DummyFieldListFile(dummy=16)
dummy.save(tmp_file.strpath)
assert 'dummy=16' in tmp_file.read()
def test_subclass_from_meta_file(self, tmpdir):
tmp_file = tmpdir.join("test_file")
tmp_file.write('dummy=17\n')
dummy = DummyFieldListFile.from_meta_file(tmp_file.strpath)
assert dummy.dummy == 17
def test_subclass_items(self):
dummy = DummyFieldListFile()
dummy.dummy = 18
assert list(dummy.items()) == [('dummy', '18')]
def test_subclass_repr(self):
dummy = DummyFieldListFile()
dummy.dummy = 18
assert repr(dummy) == "DummyFieldListFile(dummy='18')"
# noinspection PyMethodMayBeStatic
class TestWalFileInfo(object):
def test_from_file_no_compression(self, tmpdir):
tmp_file = tmpdir.join("000000000000000000000001")
tmp_file.write('dummy_content\n')
stat = os.stat(tmp_file.strpath)
wfile_info = WalFileInfo.from_file(tmp_file.strpath)
assert wfile_info.name == tmp_file.basename
assert wfile_info.size == stat.st_size
assert wfile_info.time == stat.st_mtime
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000001')
@mock.patch('barman.infofile.identify_compression')
def test_from_file_compression(self, id_compression, tmpdir):
# prepare
id_compression.return_value = 'test_compression'
tmp_file = tmpdir.join("000000000000000000000001")
tmp_file.write('dummy_content\n')
wfile_info = WalFileInfo.from_file(tmp_file.strpath)
assert wfile_info.name == tmp_file.basename
assert wfile_info.size == tmp_file.size()
assert wfile_info.time == tmp_file.mtime()
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.compression == 'test_compression'
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000001')
@mock.patch('barman.infofile.identify_compression')
def test_from_file_default_compression(self, id_compression, tmpdir):
# prepare
id_compression.return_value = None
tmp_file = tmpdir.join("00000001000000E500000064")
tmp_file.write('dummy_content\n')
wfile_info = WalFileInfo.from_file(
tmp_file.strpath,
default_compression='test_default_compression')
assert wfile_info.name == tmp_file.basename
assert wfile_info.size == tmp_file.size()
assert wfile_info.time == tmp_file.mtime()
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.compression == 'test_default_compression'
assert wfile_info.relpath() == (
'00000001000000E5/00000001000000E500000064')
@mock.patch('barman.infofile.identify_compression')
def test_from_file_override_compression(self, id_compression, tmpdir):
# prepare
id_compression.return_value = None
tmp_file = tmpdir.join("000000000000000000000001")
tmp_file.write('dummy_content\n')
wfile_info = WalFileInfo.from_file(
tmp_file.strpath,
default_compression='test_default_compression',
compression='test_override_compression')
assert wfile_info.name == tmp_file.basename
assert wfile_info.size == tmp_file.size()
assert wfile_info.time == tmp_file.mtime()
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.compression == 'test_override_compression'
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000001')
@mock.patch('barman.infofile.identify_compression')
def test_from_file_override(self, id_compression, tmpdir):
# prepare
id_compression.return_value = None
tmp_file = tmpdir.join("000000000000000000000001")
tmp_file.write('dummy_content\n')
wfile_info = WalFileInfo.from_file(
tmp_file.strpath,
name="000000000000000000000002")
assert wfile_info.name == '000000000000000000000002'
assert wfile_info.size == tmp_file.size()
assert wfile_info.time == tmp_file.mtime()
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.compression is None
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000002')
wfile_info = WalFileInfo.from_file(
tmp_file.strpath,
size=42)
assert wfile_info.name == tmp_file.basename
assert wfile_info.size == 42
assert wfile_info.time == tmp_file.mtime()
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.compression is None
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000001')
wfile_info = WalFileInfo.from_file(
tmp_file.strpath,
time=43)
assert wfile_info.name == tmp_file.basename
assert wfile_info.size == tmp_file.size()
assert wfile_info.time == 43
assert wfile_info.filename == '%s.meta' % tmp_file.strpath
assert wfile_info.compression is None
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000001')
def test_to_xlogdb_line(self):
wfile_info = WalFileInfo()
wfile_info.name = '000000000000000000000002'
wfile_info.size = 42
wfile_info.time = 43
wfile_info.compression = None
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000002')
assert wfile_info.to_xlogdb_line() == (
'000000000000000000000002\t42\t43\tNone\n')
def test_from_xlogdb_line(self):
"""
Test the conversion from a string to a WalFileInfo file
"""
# build a WalFileInfo object
wfile_info = WalFileInfo()
wfile_info.name = '000000000000000000000001'
wfile_info.size = 42
wfile_info.time = 43
wfile_info.compression = None
assert wfile_info.relpath() == (
'0000000000000000/000000000000000000000001')
# mock a server object
server = mock.Mock(name='server')
server.config.wals_directory = '/tmp/wals'
# parse the string
info_file = wfile_info.from_xlogdb_line(
'000000000000000000000001\t42\t43\tNone\n')
assert list(wfile_info.items()) == list(info_file.items())
def test_timezone_aware_parser(self):
"""
Test the timezone_aware_parser method with different string
formats
"""
# test case 1 string with timezone info
tz_string = '2009/05/13 19:19:30 -0400'
result = load_datetime_tz(tz_string)
assert result.tzinfo == tzoffset(None, -14400)
# test case 2 string with timezone info with a different format
tz_string = '2004-04-09T21:39:00-08:00'
result = load_datetime_tz(tz_string)
assert result.tzinfo == tzoffset(None, -28800)
# test case 3 string without timezone info,
# expecting tzlocal() as timezone
tz_string = str(datetime.now())
result = load_datetime_tz(tz_string)
assert result.tzinfo == tzlocal()
# test case 4 string with a wrong timezone format,
# expecting tzlocal() as timezone
tz_string = '16:08:12 05/08/03 AEST'
result = load_datetime_tz(tz_string)
assert result.tzinfo == tzlocal()
# noinspection PyMethodMayBeStatic
class TestBackupInfo(object):
def test_backup_info_from_file(self, tmpdir):
"""
Test the initialization of a BackupInfo object
loading data from a backup.info file
"""
# we want to test the loading of BackupInfo data from local file.
# So we create a file into the tmpdir containing a
# valid BackupInfo dump
infofile = tmpdir.join("backup.info")
infofile.write(BASE_BACKUP_INFO)
# Mock the server, we don't need it at the moment
server = build_mocked_server()
# load the data from the backup.info file
b_info = BackupInfo(server, info_file=infofile.strpath)
assert b_info
assert b_info.begin_offset == 40
assert b_info.begin_wal == '000000010000000000000004'
assert b_info.timeline == 1
assert isinstance(b_info.tablespaces, list)
assert b_info.tablespaces[0].name == 'fake_tbs'
assert b_info.tablespaces[0].oid == 16384
assert b_info.tablespaces[0].location == '/fake_tmp/tbs'
def test_backup_info_from_empty_file(self, tmpdir):
"""
Test the initialization of a BackupInfo object
loading data from a backup.info file
"""
# we want to test the loading of BackupInfo data from local file.
# So we create a file into the tmpdir containing a
# valid BackupInfo dump
infofile = tmpdir.join("backup.info")
infofile.write('')
# Mock the server, we don't need it at the moment
server = build_mocked_server(name='test_server')
server.backup_manager.name = 'test_mode'
# load the data from the backup.info file
b_info = BackupInfo(server, info_file=infofile.strpath)
assert b_info
assert b_info.server_name == 'test_server'
assert b_info.mode == 'test_mode'
def test_backup_info_from_backup_id(self, tmpdir):
"""
Test the initialization of a BackupInfo object
using a backup_id as argument
"""
# We want to test the loading system using a backup_id.
# So we create a backup.info file into the tmpdir then
# we instruct the configuration on the position of the
# testing backup.info file
server = build_mocked_server(
main_conf={
'basebackups_directory': tmpdir.strpath
},
)
infofile = tmpdir.mkdir('fake_name').join('backup.info')
infofile.write(BASE_BACKUP_INFO)
# Load the backup.info file using the backup_id
b_info = BackupInfo(server, backup_id="fake_name")
assert b_info
assert b_info.begin_offset == 40
assert b_info.begin_wal == '000000010000000000000004'
assert b_info.timeline == 1
assert isinstance(b_info.tablespaces, list)
assert b_info.tablespaces[0].name == 'fake_tbs'
assert b_info.tablespaces[0].oid == 16384
assert b_info.tablespaces[0].location == '/fake_tmp/tbs'
def test_backup_info_save(self, tmpdir):
"""
Test the save method of a BackupInfo object
"""
# Check the saving method.
# Load a backup.info file, modify the BackupInfo object
# then save it.
server = build_mocked_server(
main_conf={
'basebackups_directory': tmpdir.strpath
},
)
backup_dir = tmpdir.mkdir('fake_name')
infofile = backup_dir.join('backup.info')
b_info = BackupInfo(server, backup_id="fake_name")
b_info.status = BackupInfo.FAILED
b_info.save()
# read the file looking for the modified line
for line in infofile.readlines():
if line.startswith("status"):
assert line.strip() == "status=FAILED"
def test_backup_info_version(self, tmpdir):
"""
Simple test for backup_version management.
"""
server = build_mocked_server(
main_conf={
'basebackups_directory': tmpdir.strpath
},
)
# new version
backup_dir = tmpdir.mkdir('fake_backup_id')
backup_dir.mkdir('data')
backup_dir.join('backup.info')
b_info = BackupInfo(server, backup_id="fake_backup_id")
assert b_info.backup_version == 2
# old version
backup_dir = tmpdir.mkdir('another_fake_backup_id')
backup_dir.mkdir('pgdata')
backup_dir.join('backup.info')
b_info = BackupInfo(server, backup_id="another_fake_backup_id")
assert b_info.backup_version == 1
def test_data_dir(self, tmpdir):
"""
Simple test for the method that is responsible of the build of the
path to the datadir and to the tablespaces dir according
with backup_version
"""
server = build_mocked_server(
main_conf={
'basebackups_directory': tmpdir.strpath
},
)
# Build a fake v2 backup
backup_dir = tmpdir.mkdir('fake_backup_id')
data_dir = backup_dir.mkdir('data')
info_file = backup_dir.join('backup.info')
info_file.write(BASE_BACKUP_INFO)
b_info = BackupInfo(server, backup_id="fake_backup_id")
# Check that the paths are built according with version
assert b_info.backup_version == 2
assert b_info.get_data_directory() == data_dir.strpath
assert b_info.get_data_directory(16384) == (backup_dir.strpath +
'/16384')
# Build a fake v1 backup
backup_dir = tmpdir.mkdir('another_fake_backup_id')
pgdata_dir = backup_dir.mkdir('pgdata')
info_file = backup_dir.join('backup.info')
info_file.write(BASE_BACKUP_INFO)
b_info = BackupInfo(server, backup_id="another_fake_backup_id")
# Check that the paths are built according with version
assert b_info.backup_version == 1
assert b_info.get_data_directory(16384) == \
backup_dir.strpath + '/pgdata/pg_tblspc/16384'
assert b_info.get_data_directory() == pgdata_dir.strpath
# Check that an exception is raised if an invalid oid
# is provided to the method
with pytest.raises(ValueError):
b_info.get_data_directory(12345)
# Check that a ValueError exception is raised with an
# invalid oid when the tablespaces list is None
b_info.tablespaces = None
# and expect a value error
with pytest.raises(ValueError):
b_info.get_data_directory(16384)
def test_to_json(self, tmpdir):
server = build_mocked_server(
main_conf={
'basebackups_directory': tmpdir.strpath
},
)
# Build a fake backup
backup_dir = tmpdir.mkdir('fake_backup_id')
info_file = backup_dir.join('backup.info')
info_file.write(BASE_BACKUP_INFO)
b_info = BackupInfo(server, backup_id="fake_backup_id")<|fim▁hole|> assert json.dumps(b_info.to_json())
def test_from_json(self, tmpdir):
server = build_mocked_server(
main_conf={
'basebackups_directory': tmpdir.strpath
},
)
# Build a fake backup
backup_dir = tmpdir.mkdir('fake_backup_id')
info_file = backup_dir.join('backup.info')
info_file.write(BASE_BACKUP_INFO)
b_info = BackupInfo(server, backup_id="fake_backup_id")
# Build another BackupInfo from the json dump
new_binfo = BackupInfo.from_json(server, b_info.to_json())
assert b_info.to_dict() == new_binfo.to_dict()<|fim▁end|>
|
# This call should not raise
|
<|file_name|>debug.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
from __future__ import print_function
import sys, traceback, time, gc, re, types, weakref, inspect, os, cProfile, threading
from . import ptime
from numpy import ndarray
from .Qt import QtCore, QtGui
from .util.mutex import Mutex
from .util import cprint
__ftraceDepth = 0
def ftrace(func):
"""Decorator used for marking the beginning and end of function calls.
Automatically indents nested calls.
"""
def w(*args, **kargs):
global __ftraceDepth
pfx = " " * __ftraceDepth
print(pfx + func.__name__ + " start")
__ftraceDepth += 1
try:
rv = func(*args, **kargs)
finally:
__ftraceDepth -= 1
print(pfx + func.__name__ + " done")
return rv
return w
class Tracer(object):
"""
Prints every function enter/exit. Useful for debugging crashes / lockups.
"""
def __init__(self):
self.count = 0
self.stack = []
def trace(self, frame, event, arg):
self.count += 1
# If it has been a long time since we saw the top of the stack,
# print a reminder
if self.count % 1000 == 0:
print("----- current stack: -----")
for line in self.stack:
print(line)
if event == 'call':
line = " " * len(self.stack) + ">> " + self.frameInfo(frame)
print(line)
self.stack.append(line)
elif event == 'return':
self.stack.pop()
line = " " * len(self.stack) + "<< " + self.frameInfo(frame)
print(line)
if len(self.stack) == 0:
self.count = 0
return self.trace
def stop(self):
sys.settrace(None)
def start(self):
sys.settrace(self.trace)
def frameInfo(self, fr):
filename = fr.f_code.co_filename
funcname = fr.f_code.co_name
lineno = fr.f_lineno
callfr = sys._getframe(3)
callline = "%s %d" % (callfr.f_code.co_name, callfr.f_lineno)
args, _, _, value_dict = inspect.getargvalues(fr)
if len(args) and args[0] == 'self':
instance = value_dict.get('self', None)
if instance is not None:
cls = getattr(instance, '__class__', None)
if cls is not None:
funcname = cls.__name__ + "." + funcname
return "%s: %s %s: %s" % (callline, filename, lineno, funcname)
def warnOnException(func):
"""Decorator that catches/ignores exceptions and prints a stack trace."""
def w(*args, **kwds):
try:
func(*args, **kwds)
except:
printExc('Ignored exception:')
return w
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent, prefix + ' ', skip=2)
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
print(exc)
print(" "*indent + prefix + '='*30 + '<<')
def printTrace(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented stack trace"""
trace = backtrace(1)
#exc = getExc(indent, prefix + ' ')
print("[%s] %s\n" % (time.strftime("%H:%M:%S"), msg))
print(" "*indent + prefix + '='*30 + '>>')
for line in trace.split('\n'):
print(" "*indent + prefix + " " + line)
print(" "*indent + prefix + '='*30 + '<<')
def backtrace(skip=0):
return ''.join(traceback.format_stack()[:-(skip+1)])
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def printException(exctype, value, traceback):
"""Print an exception with its full traceback.
Set `sys.excepthook = printException` to ensure that exceptions caught
inside Qt signal handlers are printed with their full stack trace.
"""
print(''.join(formatException(exctype, value, traceback, skip=1)))
def listObjs(regex='Q', typ=None):
"""List all objects managed by python gc with class name matching regex.
Finds 'Q...' classes by default."""
if typ is not None:
return [x for x in gc.get_objects() if isinstance(x, typ)]<|fim▁hole|>
def findRefPath(startObj, endObj, maxLen=8, restart=True, seen={}, path=None, ignore=None):
"""Determine all paths of object references from startObj to endObj"""
refs = []
if path is None:
path = [endObj]
if ignore is None:
ignore = {}
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
ignore[id(seen)] = None
prefix = " "*(8-maxLen)
#print prefix + str(map(type, path))
prefix += " "
if restart:
#gc.collect()
seen.clear()
gc.collect()
newRefs = [r for r in gc.get_referrers(endObj) if id(r) not in ignore]
ignore[id(newRefs)] = None
#fo = allFrameObjs()
#newRefs = []
#for r in gc.get_referrers(endObj):
#try:
#if r not in fo:
#newRefs.append(r)
#except:
#newRefs.append(r)
for r in newRefs:
#print prefix+"->"+str(type(r))
if type(r).__name__ in ['frame', 'function', 'listiterator']:
#print prefix+" FRAME"
continue
try:
if any([r is x for x in path]):
#print prefix+" LOOP", objChainString([r]+path)
continue
except:
print(r)
print(path)
raise
if r is startObj:
refs.append([r])
print(refPathString([startObj]+path))
continue
if maxLen == 0:
#print prefix+" END:", objChainString([r]+path)
continue
## See if we have already searched this node.
## If not, recurse.
tree = None
try:
cache = seen[id(r)]
if cache[0] >= maxLen:
tree = cache[1]
for p in tree:
print(refPathString(p+path))
except KeyError:
pass
ignore[id(tree)] = None
if tree is None:
tree = findRefPath(startObj, r, maxLen-1, restart=False, path=[r]+path, ignore=ignore)
seen[id(r)] = [maxLen, tree]
## integrate any returned results
if len(tree) == 0:
#print prefix+" EMPTY TREE"
continue
else:
for p in tree:
refs.append(p+[r])
#seen[id(r)] = [maxLen, refs]
return refs
def objString(obj):
"""Return a short but descriptive string for any object"""
try:
if type(obj) in [int, float]:
return str(obj)
elif isinstance(obj, dict):
if len(obj) > 5:
return "<dict {%s,...}>" % (",".join(list(obj.keys())[:5]))
else:
return "<dict {%s}>" % (",".join(list(obj.keys())))
elif isinstance(obj, str):
if len(obj) > 50:
return '"%s..."' % obj[:50]
else:
return obj[:]
elif isinstance(obj, ndarray):
return "<ndarray %s %s>" % (str(obj.dtype), str(obj.shape))
elif hasattr(obj, '__len__'):
if len(obj) > 5:
return "<%s [%s,...]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj[:5]]))
else:
return "<%s [%s]>" % (type(obj).__name__, ",".join([type(o).__name__ for o in obj]))
else:
return "<%s %s>" % (type(obj).__name__, obj.__class__.__name__)
except:
return str(type(obj))
def refPathString(chain):
"""Given a list of adjacent objects in a reference path, print the 'natural' path
names (ie, attribute names, keys, and indexes) that follow from one object to the next ."""
s = objString(chain[0])
i = 0
while i < len(chain)-1:
#print " -> ", i
i += 1
o1 = chain[i-1]
o2 = chain[i]
cont = False
if isinstance(o1, list) or isinstance(o1, tuple):
if any([o2 is x for x in o1]):
s += "[%d]" % o1.index(o2)
continue
#print " not list"
if isinstance(o2, dict) and hasattr(o1, '__dict__') and o2 == o1.__dict__:
i += 1
if i >= len(chain):
s += ".__dict__"
continue
o3 = chain[i]
for k in o2:
if o2[k] is o3:
s += '.%s' % k
cont = True
continue
#print " not __dict__"
if isinstance(o1, dict):
try:
if o2 in o1:
s += "[key:%s]" % objString(o2)
continue
except TypeError:
pass
for k in o1:
if o1[k] is o2:
s += "[%s]" % objString(k)
cont = True
continue
#print " not dict"
#for k in dir(o1): ## Not safe to request attributes like this.
#if getattr(o1, k) is o2:
#s += ".%s" % k
#cont = True
#continue
#print " not attr"
if cont:
continue
s += " ? "
sys.stdout.flush()
return s
def objectSize(obj, ignore=None, verbose=False, depth=0, recursive=False):
"""Guess how much memory an object is using"""
ignoreTypes = ['MethodType', 'UnboundMethodType', 'BuiltinMethodType', 'FunctionType', 'BuiltinFunctionType']
ignoreTypes = [getattr(types, key) for key in ignoreTypes if hasattr(types, key)]
ignoreRegex = re.compile('(method-wrapper|Flag|ItemChange|Option|Mode)')
if ignore is None:
ignore = {}
indent = ' '*depth
try:
hash(obj)
hsh = obj
except:
hsh = "%s:%d" % (str(type(obj)), id(obj))
if hsh in ignore:
return 0
ignore[hsh] = 1
try:
size = sys.getsizeof(obj)
except TypeError:
size = 0
if isinstance(obj, ndarray):
try:
size += len(obj.data)
except:
pass
if recursive:
if type(obj) in [list, tuple]:
if verbose:
print(indent+"list:")
for o in obj:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', s)
size += s
elif isinstance(obj, dict):
if verbose:
print(indent+"list:")
for k in obj:
s = objectSize(obj[k], ignore=ignore, verbose=verbose, depth=depth+1)
if verbose:
print(indent+' +', k, s)
size += s
#elif isinstance(obj, QtCore.QObject):
#try:
#childs = obj.children()
#if verbose:
#print indent+"Qt children:"
#for ch in childs:
#s = objectSize(obj, ignore=ignore, verbose=verbose, depth=depth+1)
#size += s
#if verbose:
#print indent + ' +', ch.objectName(), s
#except:
#pass
#if isinstance(obj, types.InstanceType):
gc.collect()
if verbose:
print(indent+'attrs:')
for k in dir(obj):
if k in ['__dict__']:
continue
o = getattr(obj, k)
if type(o) in ignoreTypes:
continue
strtyp = str(type(o))
if ignoreRegex.search(strtyp):
continue
#if isinstance(o, types.ObjectType) and strtyp == "<type 'method-wrapper'>":
#continue
#if verbose:
#print indent, k, '?'
refs = [r for r in gc.get_referrers(o) if type(r) != types.FrameType]
if len(refs) == 1:
s = objectSize(o, ignore=ignore, verbose=verbose, depth=depth+1)
size += s
if verbose:
print(indent + " +", k, s)
#else:
#if verbose:
#print indent + ' -', k, len(refs)
return size
class GarbageWatcher(object):
"""
Convenient dictionary for holding weak references to objects.
Mainly used to check whether the objects have been collect yet or not.
Example:
gw = GarbageWatcher()
gw['objName'] = obj
gw['objName2'] = obj2
gw.check()
"""
def __init__(self):
self.objs = weakref.WeakValueDictionary()
self.allNames = []
def add(self, obj, name):
self.objs[name] = obj
self.allNames.append(name)
def __setitem__(self, name, obj):
self.add(obj, name)
def check(self):
"""Print a list of all watched objects and whether they have been collected."""
gc.collect()
dead = self.allNames[:]
alive = []
for k in self.objs:
dead.remove(k)
alive.append(k)
print("Deleted objects:", dead)
print("Live objects:", alive)
def __getitem__(self, item):
return self.objs[item]
class Profiler(object):
"""Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
"""
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
_msgs = []
disable = False # set this flag to disable all or individual profilers at runtime
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if disabled is True or (disabled == 'env' and len(cls._profilers) == 0):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers: # don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = ptime.time()
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = ptime.time()
self._newMsg(" %s: %0.4f ms",
msg, (newTime - self._lastTime) * 1000)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._newMsg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0]%m[1] for m in self._msgs]))
type(self)._msgs = []
def profile(code, name='profile_run', sort='cumulative', num=30):
"""Common-use for cProfile"""
cProfile.run(code, name)
stats = pstats.Stats(name)
stats.sort_stats(sort)
stats.print_stats(num)
return stats
#### Code for listing (nearly) all objects in the known universe
#### http://utcc.utoronto.ca/~cks/space/blog/python/GetAllObjects
# Recursively expand slist's objects
# into olist, using seen to track
# already processed objects.
def _getr(slist, olist, first=True):
i = 0
for e in slist:
oid = id(e)
typ = type(e)
if oid in olist or typ is int: ## or e in olist: ## since we're excluding all ints, there is no longer a need to check for olist keys
continue
olist[oid] = e
if first and (i%1000) == 0:
gc.collect()
tl = gc.get_referents(e)
if tl:
_getr(tl, olist, first=False)
i += 1
# The public function.
def get_all_objects():
"""Return a list of all live Python objects (excluding int and long), not including the list itself."""
gc.collect()
gcl = gc.get_objects()
olist = {}
_getr(gcl, olist)
del olist[id(olist)]
del olist[id(gcl)]
del olist[id(sys._getframe())]
return olist
def lookup(oid, objects=None):
"""Return an object given its ID, if it exists."""
if objects is None:
objects = get_all_objects()
return objects[oid]
class ObjTracker(object):
"""
Tracks all objects under the sun, reporting the changes between snapshots: what objects are created, deleted, and persistent.
This class is very useful for tracking memory leaks. The class goes to great (but not heroic) lengths to avoid tracking
its own internal objects.
Example:
ot = ObjTracker() # takes snapshot of currently existing objects
... do stuff ...
ot.diff() # prints lists of objects created and deleted since ot was initialized
... do stuff ...
ot.diff() # prints lists of objects created and deleted since last call to ot.diff()
# also prints list of items that were created since initialization AND have not been deleted yet
# (if done correctly, this list can tell you about objects that were leaked)
arrays = ot.findPersistent('ndarray') ## returns all objects matching 'ndarray' (string match, not instance checking)
## that were considered persistent when the last diff() was run
describeObj(arrays[0]) ## See if we can determine who has references to this array
"""
allObjs = {} ## keep track of all objects created and stored within class instances
allObjs[id(allObjs)] = None
def __init__(self):
self.startRefs = {} ## list of objects that exist when the tracker is initialized {oid: weakref}
## (If it is not possible to weakref the object, then the value is None)
self.startCount = {}
self.newRefs = {} ## list of objects that have been created since initialization
self.persistentRefs = {} ## list of objects considered 'persistent' when the last diff() was called
self.objTypes = {}
ObjTracker.allObjs[id(self)] = None
self.objs = [self.__dict__, self.startRefs, self.startCount, self.newRefs, self.persistentRefs, self.objTypes]
self.objs.append(self.objs)
for v in self.objs:
ObjTracker.allObjs[id(v)] = None
self.start()
def findNew(self, regex):
"""Return all objects matching regex that were considered 'new' when the last diff() was run."""
return self.findTypes(self.newRefs, regex)
def findPersistent(self, regex):
"""Return all objects matching regex that were considered 'persistent' when the last diff() was run."""
return self.findTypes(self.persistentRefs, regex)
def start(self):
"""
Remember the current set of objects as the comparison for all future calls to diff()
Called automatically on init, but can be called manually as well.
"""
refs, count, objs = self.collect()
for r in self.startRefs:
self.forgetRef(self.startRefs[r])
self.startRefs.clear()
self.startRefs.update(refs)
for r in refs:
self.rememberRef(r)
self.startCount.clear()
self.startCount.update(count)
#self.newRefs.clear()
#self.newRefs.update(refs)
def diff(self, **kargs):
"""
Compute all differences between the current object set and the reference set.
Print a set of reports for created, deleted, and persistent objects
"""
refs, count, objs = self.collect() ## refs contains the list of ALL objects
## Which refs have disappeared since call to start() (these are only displayed once, then forgotten.)
delRefs = {}
for i in list(self.startRefs.keys()):
if i not in refs:
delRefs[i] = self.startRefs[i]
del self.startRefs[i]
self.forgetRef(delRefs[i])
for i in list(self.newRefs.keys()):
if i not in refs:
delRefs[i] = self.newRefs[i]
del self.newRefs[i]
self.forgetRef(delRefs[i])
#print "deleted:", len(delRefs)
## Which refs have appeared since call to start() or diff()
persistentRefs = {} ## created since start(), but before last diff()
createRefs = {} ## created since last diff()
for o in refs:
if o not in self.startRefs:
if o not in self.newRefs:
createRefs[o] = refs[o] ## object has been created since last diff()
else:
persistentRefs[o] = refs[o] ## object has been created since start(), but before last diff() (persistent)
#print "new:", len(newRefs)
## self.newRefs holds the entire set of objects created since start()
for r in self.newRefs:
self.forgetRef(self.newRefs[r])
self.newRefs.clear()
self.newRefs.update(persistentRefs)
self.newRefs.update(createRefs)
for r in self.newRefs:
self.rememberRef(self.newRefs[r])
#print "created:", len(createRefs)
## self.persistentRefs holds all objects considered persistent.
self.persistentRefs.clear()
self.persistentRefs.update(persistentRefs)
print("----------- Count changes since start: ----------")
c1 = count.copy()
for k in self.startCount:
c1[k] = c1.get(k, 0) - self.startCount[k]
typs = list(c1.keys())
typs.sort(key=lambda a: c1[a])
for t in typs:
if c1[t] == 0:
continue
num = "%d" % c1[t]
print(" " + num + " "*(10-len(num)) + str(t))
print("----------- %d Deleted since last diff: ------------" % len(delRefs))
self.report(delRefs, objs, **kargs)
print("----------- %d Created since last diff: ------------" % len(createRefs))
self.report(createRefs, objs, **kargs)
print("----------- %d Created since start (persistent): ------------" % len(persistentRefs))
self.report(persistentRefs, objs, **kargs)
def __del__(self):
self.startRefs.clear()
self.startCount.clear()
self.newRefs.clear()
self.persistentRefs.clear()
del ObjTracker.allObjs[id(self)]
for v in self.objs:
del ObjTracker.allObjs[id(v)]
@classmethod
def isObjVar(cls, o):
return type(o) is cls or id(o) in cls.allObjs
def collect(self):
print("Collecting list of all objects...")
gc.collect()
objs = get_all_objects()
frame = sys._getframe()
del objs[id(frame)] ## ignore the current frame
del objs[id(frame.f_code)]
ignoreTypes = [int]
refs = {}
count = {}
for k in objs:
o = objs[k]
typ = type(o)
oid = id(o)
if ObjTracker.isObjVar(o) or typ in ignoreTypes:
continue
try:
ref = weakref.ref(obj)
except:
ref = None
refs[oid] = ref
typ = type(o)
typStr = typeStr(o)
self.objTypes[oid] = typStr
ObjTracker.allObjs[id(typStr)] = None
count[typ] = count.get(typ, 0) + 1
print("All objects: %d Tracked objects: %d" % (len(objs), len(refs)))
return refs, count, objs
def forgetRef(self, ref):
if ref is not None:
del ObjTracker.allObjs[id(ref)]
def rememberRef(self, ref):
## Record the address of the weakref object so it is not included in future object counts.
if ref is not None:
ObjTracker.allObjs[id(ref)] = None
def lookup(self, oid, ref, objs=None):
if ref is None or ref() is None:
try:
obj = lookup(oid, objects=objs)
except:
obj = None
else:
obj = ref()
return obj
def report(self, refs, allobjs=None, showIDs=False):
if allobjs is None:
allobjs = get_all_objects()
count = {}
rev = {}
for oid in refs:
obj = self.lookup(oid, refs[oid], allobjs)
if obj is None:
typ = "[del] " + self.objTypes[oid]
else:
typ = typeStr(obj)
if typ not in rev:
rev[typ] = []
rev[typ].append(oid)
c = count.get(typ, [0,0])
count[typ] = [c[0]+1, c[1]+objectSize(obj)]
typs = list(count.keys())
typs.sort(key=lambda a: count[a][1])
for t in typs:
line = " %d\t%d\t%s" % (count[t][0], count[t][1], t)
if showIDs:
line += "\t"+",".join(map(str,rev[t]))
print(line)
def findTypes(self, refs, regex):
allObjs = get_all_objects()
ids = {}
objs = []
r = re.compile(regex)
for k in refs:
if r.search(self.objTypes[k]):
objs.append(self.lookup(k, refs[k], allObjs))
return objs
def describeObj(obj, depth=4, path=None, ignore=None):
"""
Trace all reference paths backward, printing a list of different ways this object can be accessed.
Attempts to answer the question "who has a reference to this object"
"""
if path is None:
path = [obj]
if ignore is None:
ignore = {} ## holds IDs of objects used within the function.
ignore[id(sys._getframe())] = None
ignore[id(path)] = None
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
printed=False
for ref in refs:
if id(ref) in ignore:
continue
if id(ref) in list(map(id, path)):
print("Cyclic reference: " + refPathString([ref]+path))
printed = True
continue
newPath = [ref]+path
if len(newPath) >= depth:
refStr = refPathString(newPath)
if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell
print(refStr)
printed = True
else:
describeObj(ref, depth, newPath, ignore)
printed = True
if not printed:
print("Dead end: " + refPathString(path))
def typeStr(obj):
"""Create a more useful type string by making <instance> types report their class."""
typ = type(obj)
if typ == getattr(types, 'InstanceType', None):
return "<instance of %s>" % obj.__class__.__name__
else:
return str(typ)
def searchRefs(obj, *args):
"""Pseudo-interactive function for tracing references backward.
**Arguments:**
obj: The initial object from which to start searching
args: A set of string or int arguments.
each integer selects one of obj's referrers to be the new 'obj'
each string indicates an action to take on the current 'obj':
t: print the types of obj's referrers
l: print the lengths of obj's referrers (if they have __len__)
i: print the IDs of obj's referrers
o: print obj
ro: return obj
rr: return list of obj's referrers
Examples::
searchRefs(obj, 't') ## Print types of all objects referring to obj
searchRefs(obj, 't', 0, 't') ## ..then select the first referrer and print the types of its referrers
searchRefs(obj, 't', 0, 't', 'l') ## ..also print lengths of the last set of referrers
searchRefs(obj, 0, 1, 'ro') ## Select index 0 from obj's referrer, then select index 1 from the next set of referrers, then return that object
"""
ignore = {id(sys._getframe()): None}
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
for a in args:
#fo = allFrameObjs()
#refs = [r for r in refs if r not in fo]
if type(a) is int:
obj = refs[a]
gc.collect()
refs = gc.get_referrers(obj)
ignore[id(refs)] = None
refs = [r for r in refs if id(r) not in ignore]
elif a == 't':
print(list(map(typeStr, refs)))
elif a == 'i':
print(list(map(id, refs)))
elif a == 'l':
def slen(o):
if hasattr(o, '__len__'):
return len(o)
else:
return None
print(list(map(slen, refs)))
elif a == 'o':
print(obj)
elif a == 'ro':
return obj
elif a == 'rr':
return refs
def allFrameObjs():
"""Return list of frame objects in current stack. Useful if you want to ignore these objects in refernece searches"""
f = sys._getframe()
objs = []
while f is not None:
objs.append(f)
objs.append(f.f_code)
#objs.append(f.f_locals)
#objs.append(f.f_globals)
#objs.append(f.f_builtins)
f = f.f_back
return objs
def findObj(regex):
"""Return a list of objects whose typeStr matches regex"""
allObjs = get_all_objects()
objs = []
r = re.compile(regex)
for i in allObjs:
obj = allObjs[i]
if r.search(typeStr(obj)):
objs.append(obj)
return objs
def listRedundantModules():
"""List modules that have been imported more than once via different paths."""
mods = {}
for name, mod in sys.modules.items():
if not hasattr(mod, '__file__'):
continue
mfile = os.path.abspath(mod.__file__)
if mfile[-1] == 'c':
mfile = mfile[:-1]
if mfile in mods:
print("module at %s has 2 names: %s, %s" % (mfile, name, mods[mfile]))
else:
mods[mfile] = name
def walkQObjectTree(obj, counts=None, verbose=False, depth=0):
"""
Walk through a tree of QObjects, doing nothing to them.
The purpose of this function is to find dead objects and generate a crash
immediately rather than stumbling upon them later.
Prints a count of the objects encountered, for fun. (or is it?)
"""
if verbose:
print(" "*depth + typeStr(obj))
report = False
if counts is None:
counts = {}
report = True
typ = str(type(obj))
try:
counts[typ] += 1
except KeyError:
counts[typ] = 1
for child in obj.children():
walkQObjectTree(child, counts, verbose, depth+1)
return counts
QObjCache = {}
def qObjectReport(verbose=False):
"""Generate a report counting all QObjects and their types"""
global qObjCache
count = {}
for obj in findObj('PyQt'):
if isinstance(obj, QtCore.QObject):
oid = id(obj)
if oid not in QObjCache:
QObjCache[oid] = typeStr(obj) + " " + obj.objectName()
try:
QObjCache[oid] += " " + obj.parent().objectName()
QObjCache[oid] += " " + obj.text()
except:
pass
print("check obj", oid, str(QObjCache[oid]))
if obj.parent() is None:
walkQObjectTree(obj, count, verbose)
typs = list(count.keys())
typs.sort()
for t in typs:
print(count[t], "\t", t)
class PrintDetector(object):
"""Find code locations that print to stdout."""
def __init__(self):
self.stdout = sys.stdout
sys.stdout = self
def remove(self):
sys.stdout = self.stdout
def __del__(self):
self.remove()
def write(self, x):
self.stdout.write(x)
traceback.print_stack()
def flush(self):
self.stdout.flush()
def listQThreads():
"""Prints Thread IDs (Qt's, not OS's) for all QThreads."""
thr = findObj('[Tt]hread')
thr = [t for t in thr if isinstance(t, QtCore.QThread)]
import sip
for t in thr:
print("--> ", t)
print(" Qt ID: 0x%x" % sip.unwrapinstance(t))
def pretty(data, indent=''):
"""Format nested dict/list/tuple structures into a more human-readable string
This function is a bit better than pprint for displaying OrderedDicts.
"""
ret = ""
ind2 = indent + " "
if isinstance(data, dict):
ret = indent+"{\n"
for k, v in data.items():
ret += ind2 + repr(k) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+"}\n"
elif isinstance(data, list) or isinstance(data, tuple):
s = repr(data)
if len(s) < 40:
ret += indent + s
else:
if isinstance(data, list):
d = '[]'
else:
d = '()'
ret = indent+d[0]+"\n"
for i, v in enumerate(data):
ret += ind2 + str(i) + ": " + pretty(v, ind2).strip() + "\n"
ret += indent+d[1]+"\n"
else:
ret += indent + repr(data)
return ret
class ThreadTrace(object):
"""
Used to debug freezing by starting a new thread that reports on the
location of other threads periodically.
"""
def __init__(self, interval=10.0):
self.interval = interval
self.lock = Mutex()
self._stop = False
self.start()
def stop(self):
with self.lock:
self._stop = True
def start(self, interval=None):
if interval is not None:
self.interval = interval
self._stop = False
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
with self.lock:
if self._stop is True:
return
print("\n============= THREAD FRAMES: ================")
for id, frame in sys._current_frames().items():
if id == threading.current_thread().ident:
continue
# try to determine a thread name
try:
name = threading._active.get(id, None)
except:
name = None
if name is None:
try:
# QThread._names must be manually set by thread creators.
name = QtCore.QThread._names.get(id)
except:
name = None
if name is None:
name = "???"
print("<< thread %d \"%s\" >>" % (id, name))
traceback.print_stack(frame)
print("===============================================\n")
time.sleep(self.interval)
class ThreadColor(object):
"""
Wrapper on stdout/stderr that colors text by the current thread ID.
*stream* must be 'stdout' or 'stderr'.
"""
colors = {}
lock = Mutex()
def __init__(self, stream):
self.stream = getattr(sys, stream)
self.err = stream == 'stderr'
setattr(sys, stream, self)
def write(self, msg):
with self.lock:
cprint.cprint(self.stream, self.color(), msg, -1, stderr=self.err)
def flush(self):
with self.lock:
self.stream.flush()
def color(self):
tid = threading.current_thread()
if tid not in self.colors:
c = (len(self.colors) % 15) + 1
self.colors[tid] = c
return self.colors[tid]
def enableFaulthandler():
""" Enable faulthandler for all threads.
If the faulthandler package is available, this function disables and then
re-enables fault handling for all threads (this is necessary to ensure any
new threads are handled correctly), and returns True.
If faulthandler is not available, then returns False.
"""
try:
import faulthandler
# necessary to disable first or else new threads may not be handled.
faulthandler.disable()
faulthandler.enable(all_threads=True)
return True
except ImportError:
return False<|fim▁end|>
|
else:
return [x for x in gc.get_objects() if re.match(regex, type(x).__name__)]
|
<|file_name|>graph_map.rs<|end_file_name|><|fim▁begin|>use typed_map::TypedMemoryMap;
pub struct GraphMMap {
nodes: TypedMemoryMap<u64>,
edges: TypedMemoryMap<u32>,<|fim▁hole|>}
impl GraphMMap {
#[inline(always)]
pub fn nodes(&self) -> usize { self.nodes[..].len() }
#[inline(always)]
pub fn edges(&self, node: usize) -> &[u32] {
let nodes = &self.nodes[..];
if node + 1 < nodes.len() {
let start = nodes[node] as usize;
let limit = nodes[node+1] as usize;
&self.edges[..][start..limit]
}
else { &[] }
}
pub fn new(prefix: &str) -> GraphMMap {
GraphMMap {
nodes: TypedMemoryMap::new(format!("{}.offsets", prefix)),
edges: TypedMemoryMap::new(format!("{}.targets", prefix)),
}
}
}<|fim▁end|>
| |
<|file_name|>users.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""Setup the SkyLines application"""
from faker import Faker
from skylines.model import User
def test_admin():
u = User()
u.first_name = u'Example'
u.last_name = u'Manager'
u.email_address = u'[email protected]'
u.password = u.original_password = u'managepass'
u.admin = True
return u
def test_user():
u1 = User()
u1.first_name = u'Example'
u1.last_name = u'User'
u1.email_address = u'[email protected]'
u1.password = u1.original_password = u'test'
u1.tracking_key = 123456
u1.tracking_delay = 2<|fim▁hole|>def test_users(n=50):
fake = Faker(locale='de_DE')
fake.seed(42)
users = []
for i in xrange(n):
u = User()
u.first_name = fake.first_name()
u.last_name = fake.last_name()
u.email_address = fake.email()
u.password = u.original_password = fake.password()
u.tracking_key = fake.random_number(digits=6)
users.append(u)
return users<|fim▁end|>
|
return u1
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use serde_derive::Serialize;
mod basic;
mod errors;
mod inheritance;
mod macros;
mod square_brackets;
mod whitespace;
#[allow(dead_code)]
#[derive(Debug, Serialize)]
pub struct NestedObject {
pub label: String,
pub parent: Option<Box<NestedObject>>,
pub numbers: Vec<usize>,
}
#[derive(Debug, Serialize)]
pub struct Review {
title: String,
paragraphs: Vec<String>,
}<|fim▁hole|>
impl Review {
#[allow(dead_code)]
pub fn new() -> Review {
Review {
title: "My review".to_owned(),
paragraphs: vec!["A".to_owned(), "B".to_owned(), "C".to_owned()],
}
}
}<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use chrono::{offset::Utc, DateTime};
use diesel::{self, pg::PgConnection};
use crate::{base_post::post::Post, schema::comments};
pub mod reaction;
#[derive(Debug, Identifiable, Queryable, QueryableByName)]
#[table_name = "comments"]
pub struct Comment {
id: i32,
conversation: i32, // foreign key to topic Post
parent: i32, // foreign key to replied Post
post: i32, // foreign key to Post
created_at: DateTime<Utc>,
updated_at: DateTime<Utc>,
}
impl Comment {
pub fn id(&self) -> i32 {
self.id
}
pub fn conversation(&self) -> i32 {
self.conversation
}
pub fn parent(&self) -> i32 {
self.parent
}
pub fn post(&self) -> i32 {
self.post
}
}
#[derive(Insertable)]
#[table_name = "comments"]
pub struct NewComment {
conversation: i32,
parent: i32,
post: i32,
}
impl NewComment {
pub fn insert(self, conn: &PgConnection) -> Result<Comment, diesel::result::Error> {
use diesel::prelude::*;
diesel::insert_into(comments::table)
.values(&self)
.get_result(conn)
}
pub fn new(conversation: &Post, parent: &Post, post: &Post) -> Self {
NewComment {
conversation: conversation.id(),
parent: parent.id(),
post: post.id(),
}
}
}
#[cfg(test)]<|fim▁hole|>mod tests {
use crate::test_helper::*;
#[test]
fn create_comment_on_conversation() {
with_connection(|conn| {
make_post(conn, |conversation_post| {
make_post(conn, |comment_post| {
with_comment(
conn,
&conversation_post,
&conversation_post,
&comment_post,
|_| Ok(()),
)
})
})
})
}
#[test]
fn create_comment_in_thread() {
with_connection(|conn| {
make_post(conn, |conversation_post| {
make_post(conn, |parent_post| {
with_comment(
conn,
&conversation_post,
&conversation_post,
&parent_post,
|_parent_comment| {
make_post(conn, |comment_post| {
with_comment(
conn,
&conversation_post,
&parent_post,
&comment_post,
|_| Ok(()),
)
})
},
)
})
})
})
}
}<|fim▁end|>
| |
<|file_name|>sciond.py<|end_file_name|><|fim▁begin|># Copyright 2014 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sciond` --- Reference endhost SCION Daemon
================================================
"""
# Stdlib
import logging
import os
import errno
import threading
import time
from itertools import product
# External
from external.expiring_dict import ExpiringDict
# SCION
from lib.app.sciond import get_default_sciond_path
from lib.defines import (
GEN_CACHE_PATH,
PATH_FLAG_SIBRA,
PATH_REQ_TOUT,
SCIOND_API_SOCKDIR,
)
from lib.errors import SCIONBaseError, SCIONParseError, SCIONServiceLookupError
from lib.log import log_exception
from lib.msg_meta import SockOnlyMetadata
from lib.path_seg_meta import PathSegMeta
from lib.packet.ctrl_pld import CtrlPayload, mk_ctrl_req_id
from lib.packet.path import SCIONPath
from lib.packet.path_mgmt.base import PathMgmt
from lib.packet.path_mgmt.rev_info import (
SignedRevInfoCertFetchError,
RevInfoExpiredError,
RevInfoValidationError,
RevocationInfo,
SignedRevInfo,
SignedRevInfoVerificationError
)
from lib.packet.path_mgmt.seg_req import PathSegmentReply, PathSegmentReq
from lib.packet.scion_addr import ISD_AS
from lib.packet.scmp.types import SCMPClass, SCMPPathClass
from lib.path_combinator import build_shortcut_paths, tuples_to_full_paths
from lib.path_db import DBResult, PathSegmentDB
from lib.rev_cache import RevCache
from lib.sciond_api.as_req import SCIONDASInfoReply, SCIONDASInfoReplyEntry, SCIONDASInfoRequest
from lib.sciond_api.revocation import SCIONDRevReply, SCIONDRevReplyStatus
from lib.sciond_api.host_info import HostInfo
from lib.sciond_api.if_req import SCIONDIFInfoReply, SCIONDIFInfoReplyEntry, SCIONDIFInfoRequest
from lib.sciond_api.base import SCIONDMsg
from lib.sciond_api.path_meta import FwdPathMeta, PathInterface
from lib.sciond_api.path_req import (
SCIONDPathRequest,
SCIONDPathReplyError,
SCIONDPathReply,
SCIONDPathReplyEntry,
)
from lib.sciond_api.revocation import SCIONDRevNotification
from lib.sciond_api.segment_req import (
SCIONDSegTypeHopReply,
SCIONDSegTypeHopReplyEntry,
SCIONDSegTypeHopRequest,
)
from lib.sciond_api.service_req import (
SCIONDServiceInfoReply,
SCIONDServiceInfoReplyEntry,
SCIONDServiceInfoRequest,
)
from lib.sibra.ext.resv import ResvBlockSteady
from lib.socket import ReliableSocket
from lib.thread import thread_safety_net
from lib.types import (
CertMgmtType,
PathMgmtType as PMT,
PathSegmentType as PST,
PayloadClass,
LinkType,
SCIONDMsgType as SMT,
ServiceType,
TypeBase,
)
from lib.util import SCIONTime
from sciond.req import RequestState
from scion_elem.scion_elem import SCIONElement
_FLUSH_FLAG = "FLUSH"
class SCIONDaemon(SCIONElement):
"""
The SCION Daemon used for retrieving and combining paths.
"""
MAX_REQS = 1024
# Time a path segment is cached at a host (in seconds).
SEGMENT_TTL = 300
# Empty Path TTL
EMPTY_PATH_TTL = SEGMENT_TTL
def __init__(self, conf_dir, addr, api_addr, run_local_api=False,
port=None, spki_cache_dir=GEN_CACHE_PATH, prom_export=None, delete_sock=False):
"""
Initialize an instance of the class SCIONDaemon.
"""
super().__init__("sciond", conf_dir, spki_cache_dir=spki_cache_dir,
prom_export=prom_export, public=(addr, port))
up_labels = {**self._labels, "type": "up"} if self._labels else None
down_labels = {**self._labels, "type": "down"} if self._labels else None
core_labels = {**self._labels, "type": "core"} if self._labels else None
self.up_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=up_labels)
self.down_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=down_labels)
self.core_segments = PathSegmentDB(segment_ttl=self.SEGMENT_TTL, labels=core_labels)
self.rev_cache = RevCache()
# Keep track of requested paths.
self.requested_paths = ExpiringDict(self.MAX_REQS, PATH_REQ_TOUT)
self.req_path_lock = threading.Lock()
self._api_sock = None
self.daemon_thread = None
os.makedirs(SCIOND_API_SOCKDIR, exist_ok=True)
self.api_addr = (api_addr or get_default_sciond_path())
if delete_sock:
try:
os.remove(self.api_addr)
except OSError as e:
if e.errno != errno.ENOENT:
logging.error("Could not delete socket %s: %s" % (self.api_addr, e))
self.CTRL_PLD_CLASS_MAP = {
PayloadClass.PATH: {
PMT.REPLY: self.handle_path_reply,
PMT.REVOCATION: self.handle_revocation,
},
PayloadClass.CERT: {
CertMgmtType.CERT_CHAIN_REQ: self.process_cert_chain_request,
CertMgmtType.CERT_CHAIN_REPLY: self.process_cert_chain_reply,
CertMgmtType.TRC_REPLY: self.process_trc_reply,
CertMgmtType.TRC_REQ: self.process_trc_request,
},
}
self.SCMP_PLD_CLASS_MAP = {
SCMPClass.PATH:
{SCMPPathClass.REVOKED_IF: self.handle_scmp_revocation},
}
if run_local_api:
self._api_sock = ReliableSocket(bind_unix=(self.api_addr, "sciond"))
self._socks.add(self._api_sock, self.handle_accept)
@classmethod
def start(cls, conf_dir, addr, api_addr=None, run_local_api=False, port=0):
"""
Initializes and starts a SCIOND instance.
"""
inst = cls(conf_dir, addr, api_addr, run_local_api, port)
name = "SCIONDaemon.run %s" % inst.addr.isd_as
inst.daemon_thread = threading.Thread(
target=thread_safety_net, args=(inst.run,), name=name, daemon=True)
inst.daemon_thread.start()
logging.debug("sciond started with api_addr = %s", inst.api_addr)
def _get_msg_meta(self, packet, addr, sock):
if sock != self._udp_sock:
return packet, SockOnlyMetadata.from_values(sock) # API socket
else:
return super()._get_msg_meta(packet, addr, sock)
def handle_msg_meta(self, msg, meta):
"""
Main routine to handle incoming SCION messages.
"""
if isinstance(meta, SockOnlyMetadata): # From SCIOND API
try:
sciond_msg = SCIONDMsg.from_raw(msg)
except SCIONParseError as err:
logging.error(str(err))
return
self.api_handle_request(sciond_msg, meta)
return
super().handle_msg_meta(msg, meta)
def handle_path_reply(self, cpld, meta):
"""
Handle path reply from local path server.
"""
pmgt = cpld.union
path_reply = pmgt.union
assert isinstance(path_reply, PathSegmentReply), type(path_reply)
recs = path_reply.recs()
for srev_info in recs.iter_srev_infos():
self.check_revocation(srev_info, lambda x: self.continue_revocation_processing(
srev_info) if not x else False, meta)
req = path_reply.req()
key = req.dst_ia(), req.flags()
with self.req_path_lock:
r = self.requested_paths.get(key)
if r:
r.notify_reply(path_reply)
else:
logging.warning("No outstanding request found for %s", key)
for type_, pcb in recs.iter_pcbs():
seg_meta = PathSegMeta(pcb, self.continue_seg_processing,
meta, type_, params=(r,))
self._process_path_seg(seg_meta, cpld.req_id)
def continue_revocation_processing(self, srev_info):
self.rev_cache.add(srev_info)
self.remove_revoked_segments(srev_info.rev_info())
def continue_seg_processing(self, seg_meta):
"""
For every path segment(that can be verified) received from the path
server this function gets called to continue the processing for the
segment.
The segment is added to pathdb and pending requests are checked.
"""
pcb = seg_meta.seg
type_ = seg_meta.type
# Check that segment does not contain a revoked interface.
if not self.check_revoked_interface(pcb, self.rev_cache):
return
map_ = {
PST.UP: self._handle_up_seg,
PST.DOWN: self._handle_down_seg,
PST.CORE: self._handle_core_seg,
}
map_[type_](pcb)
r = seg_meta.params[0]
if r:
r.verified_segment()
def _handle_up_seg(self, pcb):
if self.addr.isd_as != pcb.last_ia():
return None
if self.up_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Up segment added: %s", pcb.short_desc())
return pcb.first_ia()
return None
def _handle_down_seg(self, pcb):
last_ia = pcb.last_ia()
if self.addr.isd_as == last_ia:
return None
if self.down_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Down segment added: %s", pcb.short_desc())
return last_ia
return None
def _handle_core_seg(self, pcb):
if self.core_segments.update(pcb) == DBResult.ENTRY_ADDED:
logging.debug("Core segment added: %s", pcb.short_desc())
return pcb.first_ia()
return None
def api_handle_request(self, msg, meta):
"""
Handle local API's requests.
"""
mtype = msg.type()
if mtype == SMT.PATH_REQUEST:
threading.Thread(
target=thread_safety_net,
args=(self._api_handle_path_request, msg, meta),
daemon=True).start()
elif mtype == SMT.REVOCATION:
self._api_handle_rev_notification(msg, meta)
elif mtype == SMT.AS_REQUEST:
self._api_handle_as_request(msg, meta)
elif mtype == SMT.IF_REQUEST:
self._api_handle_if_request(msg, meta)
elif mtype == SMT.SERVICE_REQUEST:
self._api_handle_service_request(msg, meta)
elif mtype == SMT.SEGTYPEHOP_REQUEST:
self._api_handle_seg_type_request(msg, meta)
else:
logging.warning(
"API: type %s not supported.", TypeBase.to_str(mtype))
def _api_handle_path_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDPathRequest), type(request)
req_id = pld.id
dst_ia = request.dst_ia()
src_ia = request.src_ia()
if not src_ia:
src_ia = self.addr.isd_as
thread = threading.current_thread()
thread.name = "SCIONDaemon API id:%s %s -> %s" % (
thread.ident, src_ia, dst_ia)
paths, error = self.get_paths(dst_ia, flush=request.p.flags.refresh)
if request.p.maxPaths:
paths = paths[:request.p.maxPaths]
reply_entries = []
for path_meta in paths:
fwd_if = path_meta.fwd_path().get_fwd_if()
# Set dummy host addr if path is empty.
haddr, port = None, None
if fwd_if:
br = self.ifid2br[fwd_if]
haddr, port = br.int_addrs.public
addrs = [haddr] if haddr else []
first_hop = HostInfo.from_values(addrs, port)
reply_entry = SCIONDPathReplyEntry.from_values(
path_meta, first_hop)
reply_entries.append(reply_entry)
logging.debug("Replying to api request for %s with %d paths:\n%s",
dst_ia, len(paths), "\n".join([p.short_desc() for p in paths]))
self._send_path_reply(req_id, reply_entries, error, meta)
def _send_path_reply(self, req_id, reply_entries, error, meta):
path_reply = SCIONDMsg(SCIONDPathReply.from_values(reply_entries, error), req_id)
self.send_meta(path_reply.pack(), meta)
def _api_handle_as_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDASInfoRequest), type(request)
req_ia = request.isd_as()
if not req_ia or req_ia.is_zero() or req_ia == self.addr.isd_as:
# Request is for the local AS.
reply_entry = SCIONDASInfoReplyEntry.from_values(
self.addr.isd_as, self.is_core_as(), self.topology.mtu)
else:
# Request is for a remote AS.
reply_entry = SCIONDASInfoReplyEntry.from_values(req_ia, self.is_core_as(req_ia))
as_reply = SCIONDMsg(SCIONDASInfoReply.from_values([reply_entry]), pld.id)
self.send_meta(as_reply.pack(), meta)
def _api_handle_if_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDIFInfoRequest), type(request)
all_brs = request.all_brs()
if_list = []
if not all_brs:
if_list = list(request.iter_ids())
if_entries = []
for if_id, br in self.ifid2br.items():
if all_brs or if_id in if_list:
br_addr, br_port = br.int_addrs.public
info = HostInfo.from_values([br_addr], br_port)
reply_entry = SCIONDIFInfoReplyEntry.from_values(if_id, info)
if_entries.append(reply_entry)
if_reply = SCIONDMsg(SCIONDIFInfoReply.from_values(if_entries), pld.id)
self.send_meta(if_reply.pack(), meta)
def _api_handle_service_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDServiceInfoRequest), type(request)
all_svcs = request.all_services()
svc_list = []
if not all_svcs:
svc_list = list(request.iter_service_types())
svc_entries = []
for svc_type in ServiceType.all():
if all_svcs or svc_type in svc_list:
lookup_res = self.dns_query_topo(svc_type)
host_infos = []
for addr, port in lookup_res:
host_infos.append(HostInfo.from_values([addr], port))
reply_entry = SCIONDServiceInfoReplyEntry.from_values(
svc_type, host_infos)
svc_entries.append(reply_entry)
svc_reply = SCIONDMsg(SCIONDServiceInfoReply.from_values(svc_entries), pld.id)
self.send_meta(svc_reply.pack(), meta)
def _api_handle_rev_notification(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDRevNotification), type(request)
self.handle_revocation(CtrlPayload(PathMgmt(request.srev_info())), meta, pld)
def _api_handle_seg_type_request(self, pld, meta):
request = pld.union
assert isinstance(request, SCIONDSegTypeHopRequest), type(request)
segmentType = request.p.type
db = []
if segmentType == PST.CORE:
db = self.core_segments
elif segmentType == PST.UP:
db = self.up_segments
elif segmentType == PST.DOWN:
db = self.down_segments
else:
logging.error("Requesting segment type %s unrecognized.", segmentType)
seg_entries = []
for segment in db(full=True):
if_list = []
for asm in segment.iter_asms():
isd_as = asm.isd_as()
hof = asm.pcbm(0).hof()
egress = hof.egress_if
ingress = hof.ingress_if
if ingress:
if_list.append(PathInterface.from_values(isd_as, ingress))
if egress:
if_list.append(PathInterface.from_values(isd_as, egress))
reply_entry = SCIONDSegTypeHopReplyEntry.from_values(
if_list, segment.get_timestamp(), segment.get_expiration_time())
seg_entries.append(reply_entry)
seg_reply = SCIONDMsg(
SCIONDSegTypeHopReply.from_values(seg_entries), pld.id)
self.send_meta(seg_reply.pack(), meta)
def handle_scmp_revocation(self, pld, meta):
srev_info = SignedRevInfo.from_raw(pld.info.srev_info)
self.handle_revocation(CtrlPayload(PathMgmt(srev_info)), meta)
def handle_revocation(self, cpld, meta, pld=None):
pmgt = cpld.union
srev_info = pmgt.union
rev_info = srev_info.rev_info()
assert isinstance(rev_info, RevocationInfo), type(rev_info)
logging.debug("Received revocation: %s from %s", srev_info.short_desc(), meta)
self.check_revocation(srev_info,
lambda e: self.process_revocation(e, srev_info, meta, pld), meta)
def process_revocation(self, error, srev_info, meta, pld):
rev_info = srev_info.rev_info()
status = None
if error is None:
status = SCIONDRevReplyStatus.VALID
self.rev_cache.add(srev_info)
self.remove_revoked_segments(rev_info)
else:
if type(error) == RevInfoValidationError:
logging.error("Failed to validate RevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.INVALID
if type(error) == RevInfoExpiredError:
logging.info("Ignoring expired Revinfo, %s from %s", srev_info.short_desc(), meta)
status = SCIONDRevReplyStatus.STALE
if type(error) == SignedRevInfoCertFetchError:
logging.error("Failed to fetch certificate for SignedRevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.UNKNOWN
if type(error) == SignedRevInfoVerificationError:
logging.error("Failed to verify SRevInfo %s from %s: %s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.SIGFAIL
if type(error) == SCIONBaseError:
logging.error("Revocation check failed for %s from %s:\n%s",
srev_info.short_desc(), meta, error)
status = SCIONDRevReplyStatus.UNKNOWN
if pld:
rev_reply = SCIONDMsg(SCIONDRevReply.from_values(status), pld.id)
self.send_meta(rev_reply.pack(), meta)
def remove_revoked_segments(self, rev_info):
# Go through all segment databases and remove affected segments.
removed_up = removed_core = removed_down = 0
if rev_info.p.linkType == LinkType.CORE:
removed_core = self._remove_revoked_pcbs(self.core_segments, rev_info)
elif rev_info.p.linkType in [LinkType.PARENT, LinkType.CHILD]:
removed_up = self._remove_revoked_pcbs(self.up_segments, rev_info)
removed_down = self._remove_revoked_pcbs(self.down_segments, rev_info)
elif rev_info.p.linkType != LinkType.PEER:
logging.error("Bad RevInfo link type: %s", rev_info.p.linkType)
logging.info("Removed %d UP- %d CORE- and %d DOWN-Segments." %
(removed_up, removed_core, removed_down))
def _remove_revoked_pcbs(self, db, rev_info):
"""
Removes all segments from 'db' that have a revoked upstream PCBMarking.
:param db: The PathSegmentDB.
:type db: :class:`lib.path_db.PathSegmentDB`
:param rev_info: The revocation info
:type rev_info: RevocationInfo
:returns: The number of deletions.
:rtype: int
"""
to_remove = []
for segment in db(full=True):
for asm in segment.iter_asms():
if self._check_revocation_for_asm(rev_info, asm, verify_all=False):
logging.debug("Removing segment: %s" % segment.short_desc())
to_remove.append(segment.get_hops_hash())
return db.delete_all(to_remove)
def _flush_path_dbs(self):
self.core_segments.flush()
self.down_segments.flush()
self.up_segments.flush()
def get_paths(self, dst_ia, flags=(), flush=False):
"""Return a list of paths."""
logging.debug("Paths requested for ISDAS=%s, flags=%s, flush=%s",
dst_ia, flags, flush)
if flush:
logging.info("Flushing PathDBs.")
self._flush_path_dbs()
if self.addr.isd_as == dst_ia or (
self.addr.isd_as.any_as() == dst_ia and
self.topology.is_core_as):
# Either the destination is the local AS, or the destination is any
# core AS in this ISD, and the local AS is in the core
empty = SCIONPath()
exp_time = int(time.time()) + self.EMPTY_PATH_TTL
empty_meta = FwdPathMeta.from_values(empty, [], self.topology.mtu, exp_time)
return [empty_meta], SCIONDPathReplyError.OK
paths = self.path_resolution(dst_ia, flags=flags)
if not paths:
key = dst_ia, flags
with self.req_path_lock:
r = self.requested_paths.get(key)
if r is None:
# No previous outstanding request
req = PathSegmentReq.from_values(self.addr.isd_as, dst_ia, flags=flags)
r = RequestState(req.copy())
self.requested_paths[key] = r
self._fetch_segments(req)
# Wait until event gets set.
timeout = not r.e.wait(PATH_REQ_TOUT)
with self.req_path_lock:
if timeout:
r.done()
if key in self.requested_paths:
del self.requested_paths[key]
if timeout:
logging.error("Query timed out for %s", dst_ia)
return [], SCIONDPathReplyError.PS_TIMEOUT
# Check if we can fulfill the path request.
paths = self.path_resolution(dst_ia, flags=flags)
if not paths:
logging.error("No paths found for %s", dst_ia)
return [], SCIONDPathReplyError.NO_PATHS
return paths, SCIONDPathReplyError.OK
def path_resolution(self, dst_ia, flags=()):
# dst as == 0 means any core AS in the specified ISD.
dst_is_core = self.is_core_as(dst_ia) or dst_ia[1] == 0
sibra = PATH_FLAG_SIBRA in flags
if self.topology.is_core_as:
if dst_is_core:
ret = self._resolve_core_core(dst_ia, sibra=sibra)
else:
ret = self._resolve_core_not_core(dst_ia, sibra=sibra)
elif dst_is_core:
ret = self._resolve_not_core_core(dst_ia, sibra=sibra)
elif sibra:
ret = self._resolve_not_core_not_core_sibra(dst_ia)
else:
ret = self._resolve_not_core_not_core_scion(dst_ia)
if not sibra:
return ret
# FIXME(kormat): Strip off PCBs, and just return sibra reservation
# blocks
return self._sibra_strip_pcbs(self._strip_nones(ret))
def _resolve_core_core(self, dst_ia, sibra=False):
"""Resolve path from core to core."""
res = set()
for cseg in self.core_segments(last_ia=self.addr.isd_as, sibra=sibra,
**dst_ia.params()):
res.add((None, cseg, None))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_core_not_core(self, dst_ia, sibra=False):
"""Resolve path from core to non-core."""
res = set()
# First check whether there is a direct path.
for dseg in self.down_segments(
first_ia=self.addr.isd_as, last_ia=dst_ia, sibra=sibra):
res.add((None, None, dseg))
# Check core-down combination.
for dseg in self.down_segments(last_ia=dst_ia, sibra=sibra):
dseg_ia = dseg.first_ia()
if self.addr.isd_as == dseg_ia:
pass<|fim▁hole|> if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_not_core_core(self, dst_ia, sibra=False):
"""Resolve path from non-core to core."""
res = set()
params = dst_ia.params()
params["sibra"] = sibra
if dst_ia[0] == self.addr.isd_as[0]:
# Dst in local ISD. First check whether DST is a (super)-parent.
for useg in self.up_segments(**params):
res.add((useg, None, None))
# Check whether dst is known core AS.
for cseg in self.core_segments(**params):
# Check do we have an up-seg that is connected to core_seg.
for useg in self.up_segments(first_ia=cseg.last_ia(), sibra=sibra):
res.add((useg, cseg, None))
if sibra:
return res
return tuples_to_full_paths(res)
def _resolve_not_core_not_core_scion(self, dst_ia):
"""Resolve SCION path from non-core to non-core."""
up_segs = self.up_segments()
down_segs = self.down_segments(last_ia=dst_ia)
core_segs = self._calc_core_segs(dst_ia[0], up_segs, down_segs)
full_paths = build_shortcut_paths(
up_segs, down_segs, self.rev_cache)
tuples = []
for up_seg in up_segs:
for down_seg in down_segs:
tuples.append((up_seg, None, down_seg))
for core_seg in core_segs:
tuples.append((up_seg, core_seg, down_seg))
full_paths.extend(tuples_to_full_paths(tuples))
return full_paths
def _resolve_not_core_not_core_sibra(self, dst_ia):
"""Resolve SIBRA path from non-core to non-core."""
res = set()
up_segs = set(self.up_segments(sibra=True))
down_segs = set(self.down_segments(last_ia=dst_ia, sibra=True))
for up_seg, down_seg in product(up_segs, down_segs):
src_core_ia = up_seg.first_ia()
dst_core_ia = down_seg.first_ia()
if src_core_ia == dst_core_ia:
res.add((up_seg, down_seg))
continue
for core_seg in self.core_segments(first_ia=dst_core_ia,
last_ia=src_core_ia, sibra=True):
res.add((up_seg, core_seg, down_seg))
return res
def _strip_nones(self, set_):
"""Strip None entries from a set of tuples"""
res = []
for tup in set_:
res.append(tuple(filter(None, tup)))
return res
def _sibra_strip_pcbs(self, paths):
ret = []
for pcbs in paths:
resvs = []
for pcb in pcbs:
resvs.append(self._sibra_strip_pcb(pcb))
ret.append(resvs)
return ret
def _sibra_strip_pcb(self, pcb):
assert pcb.is_sibra()
pcb_ext = pcb.sibra_ext
resv_info = pcb_ext.info
resv = ResvBlockSteady.from_values(resv_info, pcb.get_n_hops())
asms = pcb.iter_asms()
if pcb_ext.p.up:
asms = reversed(list(asms))
iflist = []
for sof, asm in zip(pcb_ext.iter_sofs(), asms):
resv.sofs.append(sof)
iflist.extend(self._sibra_add_ifs(
asm.isd_as(), sof, resv_info.fwd_dir))
assert resv.num_hops == len(resv.sofs)
return pcb_ext.p.id, resv, iflist
def _sibra_add_ifs(self, isd_as, sof, fwd):
def _add(ifid):
if ifid:
ret.append((isd_as, ifid))
ret = []
if fwd:
_add(sof.ingress)
_add(sof.egress)
else:
_add(sof.egress)
_add(sof.ingress)
return ret
def _wait_for_events(self, events, deadline):
"""
Wait on a set of events, but only until the specified deadline. Returns
the number of events that happened while waiting.
"""
count = 0
for e in events:
if e.wait(max(0, deadline - SCIONTime.get_time())):
count += 1
return count
def _fetch_segments(self, req):
"""
Called to fetch the requested path.
"""
try:
addr, port = self.dns_query_topo(ServiceType.PS)[0]
except SCIONServiceLookupError:
log_exception("Error querying path service:")
return
req_id = mk_ctrl_req_id()
logging.debug("Sending path request (%s) to [%s]:%s [id: %016x]",
req.short_desc(), addr, port, req_id)
meta = self._build_meta(host=addr, port=port)
self.send_meta(CtrlPayload(PathMgmt(req), req_id=req_id), meta)
def _calc_core_segs(self, dst_isd, up_segs, down_segs):
"""
Calculate all possible core segments joining the provided up and down
segments. Returns a list of all known segments, and a seperate list of
the missing AS pairs.
"""
src_core_ases = set()
dst_core_ases = set()
for seg in up_segs:
src_core_ases.add(seg.first_ia()[1])
for seg in down_segs:
dst_core_ases.add(seg.first_ia()[1])
# Generate all possible AS pairs
as_pairs = list(product(src_core_ases, dst_core_ases))
return self._find_core_segs(self.addr.isd_as[0], dst_isd, as_pairs)
def _find_core_segs(self, src_isd, dst_isd, as_pairs):
"""
Given a set of AS pairs across 2 ISDs, return the core segments
connecting those pairs
"""
core_segs = []
for src_core_as, dst_core_as in as_pairs:
src_ia = ISD_AS.from_values(src_isd, src_core_as)
dst_ia = ISD_AS.from_values(dst_isd, dst_core_as)
if src_ia == dst_ia:
continue
seg = self.core_segments(first_ia=dst_ia, last_ia=src_ia)
if seg:
core_segs.extend(seg)
return core_segs
def run(self):
"""
Run an instance of the SCION daemon.
"""
threading.Thread(
target=thread_safety_net, args=(self._check_trc_cert_reqs,),
name="Elem.check_trc_cert_reqs", daemon=True).start()
super().run()<|fim▁end|>
|
for cseg in self.core_segments(
first_ia=dseg_ia, last_ia=self.addr.isd_as, sibra=sibra):
res.add((None, cseg, dseg))
|
<|file_name|>dev_settings.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from .common_settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dz(#w(lfve24ck!!yrt3l7$jfdoj+fgf+ru@w)!^gn9aq$s+&y'
<|fim▁hole|> 'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}<|fim▁end|>
|
DATABASES = {
'default': {
|
<|file_name|>IMeasureableEntity.java<|end_file_name|><|fim▁begin|><|fim▁hole|>package com.github.scaronthesky.eternalwinterwars.view.entities;
public interface IMeasureableEntity {
public float getWidth();
public float getHeight();
}<|fim▁end|>
| |
<|file_name|>out2_sup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class<|fim▁hole|> if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class people(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('comments', 'comments', 1),
MemberSpec_('person', 'person', 1),
MemberSpec_('programmer', 'programmer', 1),
MemberSpec_('python_programmer', 'python-programmer', 1),
MemberSpec_('java_programmer', 'java-programmer', 1),
]
subclass = None
superclass = None
def __init__(self, comments=None, person=None, programmer=None, python_programmer=None, java_programmer=None):
if comments is None:
self.comments = []
else:
self.comments = comments
if person is None:
self.person = []
else:
self.person = person
if programmer is None:
self.programmer = []
else:
self.programmer = programmer
if python_programmer is None:
self.python_programmer = []
else:
self.python_programmer = python_programmer
if java_programmer is None:
self.java_programmer = []
else:
self.java_programmer = java_programmer
def factory(*args_, **kwargs_):
if people.subclass:
return people.subclass(*args_, **kwargs_)
else:
return people(*args_, **kwargs_)
factory = staticmethod(factory)
def get_comments(self): return self.comments
def set_comments(self, comments): self.comments = comments
def add_comments(self, value): self.comments.append(value)
def insert_comments(self, index, value): self.comments[index] = value
def get_person(self): return self.person
def set_person(self, person): self.person = person
def add_person(self, value): self.person.append(value)
def insert_person(self, index, value): self.person[index] = value
def get_programmer(self): return self.programmer
def set_programmer(self, programmer): self.programmer = programmer
def add_programmer(self, value): self.programmer.append(value)
def insert_programmer(self, index, value): self.programmer[index] = value
def get_python_programmer(self): return self.python_programmer
def set_python_programmer(self, python_programmer): self.python_programmer = python_programmer
def add_python_programmer(self, value): self.python_programmer.append(value)
def insert_python_programmer(self, index, value): self.python_programmer[index] = value
def get_java_programmer(self): return self.java_programmer
def set_java_programmer(self, java_programmer): self.java_programmer = java_programmer
def add_java_programmer(self, value): self.java_programmer.append(value)
def insert_java_programmer(self, index, value): self.java_programmer[index] = value
def hasContent_(self):
if (
self.comments or
self.person or
self.programmer or
self.python_programmer or
self.java_programmer
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='people', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='people')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='people'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='people', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for comments_ in self.comments:
comments_.export(outfile, level, namespace_, name_='comments', pretty_print=pretty_print)
for person_ in self.person:
person_.export(outfile, level, namespace_, name_='person', pretty_print=pretty_print)
for programmer_ in self.programmer:
programmer_.export(outfile, level, namespace_, name_='programmer', pretty_print=pretty_print)
for python_programmer_ in self.python_programmer:
python_programmer_.export(outfile, level, namespace_, name_='python-programmer', pretty_print=pretty_print)
for java_programmer_ in self.java_programmer:
java_programmer_.export(outfile, level, namespace_, name_='java-programmer', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='people'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('comments=[\n')
level += 1
for comments_ in self.comments:
showIndent(outfile, level)
outfile.write('model_.comments(\n')
comments_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('person=[\n')
level += 1
for person_ in self.person:
showIndent(outfile, level)
outfile.write('model_.person(\n')
person_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('programmer=[\n')
level += 1
for programmer_ in self.programmer:
showIndent(outfile, level)
outfile.write('model_.programmer(\n')
programmer_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('python_programmer=[\n')
level += 1
for python_programmer_ in self.python_programmer:
showIndent(outfile, level)
outfile.write('model_.python_programmer(\n')
python_programmer_.exportLiteral(outfile, level, name_='python-programmer')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('java_programmer=[\n')
level += 1
for java_programmer_ in self.java_programmer:
showIndent(outfile, level)
outfile.write('model_.java_programmer(\n')
java_programmer_.exportLiteral(outfile, level, name_='java-programmer')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'comments':
obj_ = comments.factory()
obj_.build(child_)
self.comments.append(obj_)
elif nodeName_ == 'person':
class_obj_ = self.get_class_obj_(child_, person)
obj_ = class_obj_.factory()
obj_.build(child_)
self.person.append(obj_)
elif nodeName_ == 'programmer':
class_obj_ = self.get_class_obj_(child_, programmer)
obj_ = class_obj_.factory()
obj_.build(child_)
self.programmer.append(obj_)
elif nodeName_ == 'python-programmer':
obj_ = python_programmer.factory()
obj_.build(child_)
self.python_programmer.append(obj_)
elif nodeName_ == 'java-programmer':
obj_ = java_programmer.factory()
obj_.build(child_)
self.java_programmer.append(obj_)
def walk_and_update(self):
members = people._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if people.superclass != None:
people.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: people depth: %d' % (counter, depth, )
members = people._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
def set_up(self):
global types, counter
import types as types_module
types = types_module
counter = 0
# end class people
class comments(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('emp', 'xs:string', 1),
MemberSpec_('valueOf_', [], 0),
]
subclass = None
superclass = None
def __init__(self, emp=None, valueOf_=None, mixedclass_=None, content_=None):
if emp is None:
self.emp = []
else:
self.emp = emp
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if comments.subclass:
return comments.subclass(*args_, **kwargs_)
else:
return comments(*args_, **kwargs_)
factory = staticmethod(factory)
def get_emp(self): return self.emp
def set_emp(self, emp): self.emp = emp
def add_emp(self, value): self.emp.append(value)
def insert_emp(self, index, value): self.emp[index] = value
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.emp or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='comments', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='comments')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='comments'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='comments', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='comments'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'emp' and child_.text is not None:
valuestr_ = child_.text
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'emp', valuestr_)
self.content_.append(obj_)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
def walk_and_update(self):
members = comments._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if comments.superclass != None:
comments.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: comments depth: %d' % (counter, depth, )
members = comments._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class comments
class person(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('vegetable', 'xs:string', 0),
MemberSpec_('fruit', 'xs:string', 0),
MemberSpec_('ratio', 'xs:float', 0),
MemberSpec_('id', 'xs:integer', 0),
MemberSpec_('value', 'xs:string', 0),
MemberSpec_('name', 'xs:string', 0),
MemberSpec_('interest', 'xs:string', 1),
MemberSpec_('category', 'xs:integer', 0),
MemberSpec_('agent', 'agent', 1),
MemberSpec_('promoter', 'booster', 1),
MemberSpec_('description', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, extensiontype_=None):
self.vegetable = _cast(None, vegetable)
self.fruit = _cast(None, fruit)
self.ratio = _cast(float, ratio)
self.id = _cast(int, id)
self.value = _cast(None, value)
self.name = name
if interest is None:
self.interest = []
else:
self.interest = interest
self.category = category
if agent is None:
self.agent = []
else:
self.agent = agent
if promoter is None:
self.promoter = []
else:
self.promoter = promoter
self.description = description
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if person.subclass:
return person.subclass(*args_, **kwargs_)
else:
return person(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_interest(self): return self.interest
def set_interest(self, interest): self.interest = interest
def add_interest(self, value): self.interest.append(value)
def insert_interest(self, index, value): self.interest[index] = value
def get_category(self): return self.category
def set_category(self, category): self.category = category
def get_agent(self): return self.agent
def set_agent(self, agent): self.agent = agent
def add_agent(self, value): self.agent.append(value)
def insert_agent(self, index, value): self.agent[index] = value
def get_promoter(self): return self.promoter
def set_promoter(self, promoter): self.promoter = promoter
def add_promoter(self, value): self.promoter.append(value)
def insert_promoter(self, index, value): self.promoter[index] = value
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_vegetable(self): return self.vegetable
def set_vegetable(self, vegetable): self.vegetable = vegetable
def get_fruit(self): return self.fruit
def set_fruit(self, fruit): self.fruit = fruit
def get_ratio(self): return self.ratio
def set_ratio(self, ratio): self.ratio = ratio
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.name is not None or
self.interest or
self.category is not None or
self.agent or
self.promoter or
self.description is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='person', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='person')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='person'):
if self.vegetable is not None and 'vegetable' not in already_processed:
already_processed.add('vegetable')
outfile.write(' vegetable=%s' % (self.gds_format_string(quote_attrib(self.vegetable).encode(ExternalEncoding), input_name='vegetable'), ))
if self.fruit is not None and 'fruit' not in already_processed:
already_processed.add('fruit')
outfile.write(' fruit=%s' % (self.gds_format_string(quote_attrib(self.fruit).encode(ExternalEncoding), input_name='fruit'), ))
if self.ratio is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
outfile.write(' ratio="%s"' % self.gds_format_float(self.ratio, input_name='ratio'))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='person', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
for interest_ in self.interest:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinterest>%s</%sinterest>%s' % (namespace_, self.gds_format_string(quote_xml(interest_).encode(ExternalEncoding), input_name='interest'), namespace_, eol_))
if self.category is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scategory>%s</%scategory>%s' % (namespace_, self.gds_format_integer(self.category, input_name='category'), namespace_, eol_))
for agent_ in self.agent:
agent_.export(outfile, level, namespace_, name_='agent', pretty_print=pretty_print)
for promoter_ in self.promoter:
promoter_.export(outfile, level, namespace_, name_='promoter', pretty_print=pretty_print)
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='person'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.vegetable is not None and 'vegetable' not in already_processed:
already_processed.add('vegetable')
showIndent(outfile, level)
outfile.write('vegetable="%s",\n' % (self.vegetable,))
if self.fruit is not None and 'fruit' not in already_processed:
already_processed.add('fruit')
showIndent(outfile, level)
outfile.write('fruit="%s",\n' % (self.fruit,))
if self.ratio is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
showIndent(outfile, level)
outfile.write('ratio=%f,\n' % (self.ratio,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%d,\n' % (self.id,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('interest=[\n')
level += 1
for interest_ in self.interest:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(interest_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.category is not None:
showIndent(outfile, level)
outfile.write('category=%d,\n' % self.category)
showIndent(outfile, level)
outfile.write('agent=[\n')
level += 1
for agent_ in self.agent:
showIndent(outfile, level)
outfile.write('model_.agent(\n')
agent_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('promoter=[\n')
level += 1
for promoter_ in self.promoter:
showIndent(outfile, level)
outfile.write('model_.booster(\n')
promoter_.exportLiteral(outfile, level, name_='booster')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('vegetable', node)
if value is not None and 'vegetable' not in already_processed:
already_processed.add('vegetable')
self.vegetable = value
value = find_attr_value_('fruit', node)
if value is not None and 'fruit' not in already_processed:
already_processed.add('fruit')
self.fruit = value
value = find_attr_value_('ratio', node)
if value is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
try:
self.ratio = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (ratio): %s' % exp)
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'interest':
interest_ = child_.text
interest_ = self.gds_validate_string(interest_, node, 'interest')
self.interest.append(interest_)
elif nodeName_ == 'category':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'category')
self.category = ival_
elif nodeName_ == 'agent':
obj_ = agent.factory()
obj_.build(child_)
self.agent.append(obj_)
elif nodeName_ == 'promoter':
obj_ = booster.factory()
obj_.build(child_)
self.promoter.append(obj_)
elif nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
def walk_and_update(self):
members = person._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if person.superclass != None:
person.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: person depth: %d' % (counter, depth, )
members = person._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class person
class programmer(person):
member_data_items_ = [
MemberSpec_('language', 'xs:string', 0),
MemberSpec_('area', 'xs:string', 0),
MemberSpec_('attrnegint', 'xs:negativeInteger', 0),
MemberSpec_('attrposint', 'xs:positiveInteger', 0),
MemberSpec_('attrnonnegint', 'xs:nonNegativeInteger', 0),
MemberSpec_('attrnonposint', 'xs:nonPositiveInteger', 0),
MemberSpec_('email', 'xs:string', 0),
MemberSpec_('elposint', 'xs:positiveInteger', 0),
MemberSpec_('elnonposint', 'xs:nonPositiveInteger', 0),
MemberSpec_('elnegint', 'xs:negativeInteger', 0),
MemberSpec_('elnonnegint', 'xs:nonNegativeInteger', 0),
MemberSpec_('eldate', 'xs:date', 0),
MemberSpec_('eldatetime', 'xs:dateTime', 0),
MemberSpec_('eltoken', 'xs:token', 0),
MemberSpec_('elshort', 'xs:short', 0),
MemberSpec_('ellong', 'xs:long', 0),
MemberSpec_('elparam', 'param', 0),
MemberSpec_('elarraytypes', ['ArrayTypes', 'xs:NMTOKEN'], 0),
]
subclass = None
superclass = person
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eldatetime=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, extensiontype_=None):
super(programmer, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, extensiontype_, )
self.language = _cast(None, language)
self.area = _cast(None, area)
self.attrnegint = _cast(int, attrnegint)
self.attrposint = _cast(int, attrposint)
self.attrnonnegint = _cast(int, attrnonnegint)
self.attrnonposint = _cast(int, attrnonposint)
self.email = email
self.elposint = elposint
self.elnonposint = elnonposint
self.elnegint = elnegint
self.elnonnegint = elnonnegint
if isinstance(eldate, basestring):
initvalue_ = datetime_.datetime.strptime(eldate, '%Y-%m-%d').date()
else:
initvalue_ = eldate
self.eldate = initvalue_
if isinstance(eldatetime, basestring):
initvalue_ = datetime_.datetime.strptime(eldatetime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = eldatetime
self.eldatetime = initvalue_
self.eltoken = eltoken
self.elshort = elshort
self.ellong = ellong
self.elparam = elparam
self.elarraytypes = elarraytypes
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if programmer.subclass:
return programmer.subclass(*args_, **kwargs_)
else:
return programmer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_email(self): return self.email
def set_email(self, email): self.email = email
def get_elposint(self): return self.elposint
def set_elposint(self, elposint): self.elposint = elposint
def get_elnonposint(self): return self.elnonposint
def set_elnonposint(self, elnonposint): self.elnonposint = elnonposint
def get_elnegint(self): return self.elnegint
def set_elnegint(self, elnegint): self.elnegint = elnegint
def get_elnonnegint(self): return self.elnonnegint
def set_elnonnegint(self, elnonnegint): self.elnonnegint = elnonnegint
def get_eldate(self): return self.eldate
def set_eldate(self, eldate): self.eldate = eldate
def get_eldatetime(self): return self.eldatetime
def set_eldatetime(self, eldatetime): self.eldatetime = eldatetime
def get_eltoken(self): return self.eltoken
def set_eltoken(self, eltoken): self.eltoken = eltoken
def get_elshort(self): return self.elshort
def set_elshort(self, elshort): self.elshort = elshort
def get_ellong(self): return self.ellong
def set_ellong(self, ellong): self.ellong = ellong
def get_elparam(self): return self.elparam
def set_elparam(self, elparam): self.elparam = elparam
def get_elarraytypes(self): return self.elarraytypes
def set_elarraytypes(self, elarraytypes): self.elarraytypes = elarraytypes
def get_language(self): return self.language
def set_language(self, language): self.language = language
def get_area(self): return self.area
def set_area(self, area): self.area = area
def get_attrnegint(self): return self.attrnegint
def set_attrnegint(self, attrnegint): self.attrnegint = attrnegint
def get_attrposint(self): return self.attrposint
def set_attrposint(self, attrposint): self.attrposint = attrposint
def get_attrnonnegint(self): return self.attrnonnegint
def set_attrnonnegint(self, attrnonnegint): self.attrnonnegint = attrnonnegint
def get_attrnonposint(self): return self.attrnonposint
def set_attrnonposint(self, attrnonposint): self.attrnonposint = attrnonposint
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def validate_ArrayTypes(self, value):
# Validate type ArrayTypes, a restriction on xs:NMTOKEN.
pass
def hasContent_(self):
if (
self.email is not None or
self.elposint is not None or
self.elnonposint is not None or
self.elnegint is not None or
self.elnonnegint is not None or
self.eldate is not None or
self.eldatetime is not None or
self.eltoken is not None or
self.elshort is not None or
self.ellong is not None or
self.elparam is not None or
self.elarraytypes is not None or
super(programmer, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='programmer', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='programmer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='programmer'):
super(programmer, self).exportAttributes(outfile, level, already_processed, namespace_, name_='programmer')
if self.language is not None and 'language' not in already_processed:
already_processed.add('language')
outfile.write(' language=%s' % (self.gds_format_string(quote_attrib(self.language).encode(ExternalEncoding), input_name='language'), ))
if self.area is not None and 'area' not in already_processed:
already_processed.add('area')
outfile.write(' area=%s' % (self.gds_format_string(quote_attrib(self.area).encode(ExternalEncoding), input_name='area'), ))
if self.attrnegint is not None and 'attrnegint' not in already_processed:
already_processed.add('attrnegint')
outfile.write(' attrnegint="%s"' % self.gds_format_integer(self.attrnegint, input_name='attrnegint'))
if self.attrposint is not None and 'attrposint' not in already_processed:
already_processed.add('attrposint')
outfile.write(' attrposint="%s"' % self.gds_format_integer(self.attrposint, input_name='attrposint'))
if self.attrnonnegint is not None and 'attrnonnegint' not in already_processed:
already_processed.add('attrnonnegint')
outfile.write(' attrnonnegint="%s"' % self.gds_format_integer(self.attrnonnegint, input_name='attrnonnegint'))
if self.attrnonposint is not None and 'attrnonposint' not in already_processed:
already_processed.add('attrnonposint')
outfile.write(' attrnonposint="%s"' % self.gds_format_integer(self.attrnonposint, input_name='attrnonposint'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='programmer', fromsubclass_=False, pretty_print=True):
super(programmer, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.email is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%semail>%s</%semail>%s' % (namespace_, self.gds_format_string(quote_xml(self.email).encode(ExternalEncoding), input_name='email'), namespace_, eol_))
if self.elposint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selposint>%s</%selposint>%s' % (namespace_, self.gds_format_integer(self.elposint, input_name='elposint'), namespace_, eol_))
if self.elnonposint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selnonposint>%s</%selnonposint>%s' % (namespace_, self.gds_format_integer(self.elnonposint, input_name='elnonposint'), namespace_, eol_))
if self.elnegint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selnegint>%s</%selnegint>%s' % (namespace_, self.gds_format_integer(self.elnegint, input_name='elnegint'), namespace_, eol_))
if self.elnonnegint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selnonnegint>%s</%selnonnegint>%s' % (namespace_, self.gds_format_integer(self.elnonnegint, input_name='elnonnegint'), namespace_, eol_))
if self.eldate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%seldate>%s</%seldate>%s' % (namespace_, self.gds_format_date(self.eldate, input_name='eldate'), namespace_, eol_))
if self.eldatetime is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%seldatetime>%s</%seldatetime>%s' % (namespace_, self.gds_format_datetime(self.eldatetime, input_name='eldatetime'), namespace_, eol_))
if self.eltoken is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%seltoken>%s</%seltoken>%s' % (namespace_, self.gds_format_string(quote_xml(self.eltoken).encode(ExternalEncoding), input_name='eltoken'), namespace_, eol_))
if self.elshort is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selshort>%s</%selshort>%s' % (namespace_, self.gds_format_integer(self.elshort, input_name='elshort'), namespace_, eol_))
if self.ellong is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sellong>%s</%sellong>%s' % (namespace_, self.gds_format_integer(self.ellong, input_name='ellong'), namespace_, eol_))
if self.elparam is not None:
self.elparam.export(outfile, level, namespace_, name_='elparam', pretty_print=pretty_print)
if self.elarraytypes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selarraytypes>%s</%selarraytypes>%s' % (namespace_, self.gds_format_string(quote_xml(self.elarraytypes).encode(ExternalEncoding), input_name='elarraytypes'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='programmer'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.language is not None and 'language' not in already_processed:
already_processed.add('language')
showIndent(outfile, level)
outfile.write('language="%s",\n' % (self.language,))
if self.area is not None and 'area' not in already_processed:
already_processed.add('area')
showIndent(outfile, level)
outfile.write('area="%s",\n' % (self.area,))
if self.attrnegint is not None and 'attrnegint' not in already_processed:
already_processed.add('attrnegint')
showIndent(outfile, level)
outfile.write('attrnegint=%d,\n' % (self.attrnegint,))
if self.attrposint is not None and 'attrposint' not in already_processed:
already_processed.add('attrposint')
showIndent(outfile, level)
outfile.write('attrposint=%d,\n' % (self.attrposint,))
if self.attrnonnegint is not None and 'attrnonnegint' not in already_processed:
already_processed.add('attrnonnegint')
showIndent(outfile, level)
outfile.write('attrnonnegint=%d,\n' % (self.attrnonnegint,))
if self.attrnonposint is not None and 'attrnonposint' not in already_processed:
already_processed.add('attrnonposint')
showIndent(outfile, level)
outfile.write('attrnonposint=%d,\n' % (self.attrnonposint,))
super(programmer, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(programmer, self).exportLiteralChildren(outfile, level, name_)
if self.email is not None:
showIndent(outfile, level)
outfile.write('email=%s,\n' % quote_python(self.email).encode(ExternalEncoding))
if self.elposint is not None:
showIndent(outfile, level)
outfile.write('elposint=%d,\n' % self.elposint)
if self.elnonposint is not None:
showIndent(outfile, level)
outfile.write('elnonposint=%d,\n' % self.elnonposint)
if self.elnegint is not None:
showIndent(outfile, level)
outfile.write('elnegint=%d,\n' % self.elnegint)
if self.elnonnegint is not None:
showIndent(outfile, level)
outfile.write('elnonnegint=%d,\n' % self.elnonnegint)
if self.eldate is not None:
showIndent(outfile, level)
outfile.write('eldate=model_.GeneratedsSuper.gds_parse_date("%s"),\n' % self.gds_format_date(self.eldate, input_name='eldate'))
if self.eldatetime is not None:
showIndent(outfile, level)
outfile.write('eldatetime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.eldatetime, input_name='eldatetime'))
if self.eltoken is not None:
showIndent(outfile, level)
outfile.write('eltoken=%s,\n' % quote_python(self.eltoken).encode(ExternalEncoding))
if self.elshort is not None:
showIndent(outfile, level)
outfile.write('elshort=%d,\n' % self.elshort)
if self.ellong is not None:
showIndent(outfile, level)
outfile.write('ellong=%d,\n' % self.ellong)
if self.elparam is not None:
showIndent(outfile, level)
outfile.write('elparam=model_.param(\n')
self.elparam.exportLiteral(outfile, level, name_='elparam')
showIndent(outfile, level)
outfile.write('),\n')
if self.elarraytypes is not None:
showIndent(outfile, level)
outfile.write('elarraytypes=%s,\n' % quote_python(self.elarraytypes).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('language', node)
if value is not None and 'language' not in already_processed:
already_processed.add('language')
self.language = value
value = find_attr_value_('area', node)
if value is not None and 'area' not in already_processed:
already_processed.add('area')
self.area = value
value = find_attr_value_('attrnegint', node)
if value is not None and 'attrnegint' not in already_processed:
already_processed.add('attrnegint')
try:
self.attrnegint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrnegint >= 0:
raise_parse_error(node, 'Invalid NegativeInteger')
value = find_attr_value_('attrposint', node)
if value is not None and 'attrposint' not in already_processed:
already_processed.add('attrposint')
try:
self.attrposint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrposint <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
value = find_attr_value_('attrnonnegint', node)
if value is not None and 'attrnonnegint' not in already_processed:
already_processed.add('attrnonnegint')
try:
self.attrnonnegint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrnonnegint < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('attrnonposint', node)
if value is not None and 'attrnonposint' not in already_processed:
already_processed.add('attrnonposint')
try:
self.attrnonposint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrnonposint > 0:
raise_parse_error(node, 'Invalid NonPositiveInteger')
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(programmer, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'email':
email_ = child_.text
email_ = self.gds_validate_string(email_, node, 'email')
self.email = email_
elif nodeName_ == 'elposint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elposint')
self.elposint = ival_
elif nodeName_ == 'elnonposint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ > 0:
raise_parse_error(child_, 'requires nonPositiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elnonposint')
self.elnonposint = ival_
elif nodeName_ == 'elnegint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ >= 0:
raise_parse_error(child_, 'requires negativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elnegint')
self.elnegint = ival_
elif nodeName_ == 'elnonnegint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elnonnegint')
self.elnonnegint = ival_
elif nodeName_ == 'eldate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.eldate = dval_
elif nodeName_ == 'eldatetime':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.eldatetime = dval_
elif nodeName_ == 'eltoken':
eltoken_ = child_.text
eltoken_ = re_.sub(String_cleanup_pat_, " ", eltoken_).strip()
eltoken_ = self.gds_validate_string(eltoken_, node, 'eltoken')
self.eltoken = eltoken_
elif nodeName_ == 'elshort':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'elshort')
self.elshort = ival_
elif nodeName_ == 'ellong':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ellong')
self.ellong = ival_
elif nodeName_ == 'elparam':
obj_ = param.factory()
obj_.build(child_)
self.elparam = obj_
elif nodeName_ == 'elarraytypes':
elarraytypes_ = child_.text
elarraytypes_ = self.gds_validate_string(elarraytypes_, node, 'elarraytypes')
self.elarraytypes = elarraytypes_
self.validate_ArrayTypes(self.elarraytypes) # validate type ArrayTypes
super(programmer, self).buildChildren(child_, node, nodeName_, True)
def walk_and_update(self):
members = programmer._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if programmer.superclass != None:
programmer.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: programmer depth: %d' % (counter, depth, )
members = programmer._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class programmer
class param(GeneratedsSuper):
"""Finding flow attribute unneccesary in practice. A unnamed parameter
is unbound/skipped."""
member_data_items_ = [
MemberSpec_('semantic', 'xs:token', 0),
MemberSpec_('name', 'xs:NCName', 0),
MemberSpec_('flow', 'FlowType', 0),
MemberSpec_('sid', 'xs:NCName', 0),
MemberSpec_('type', 'xs:NMTOKEN', 0),
MemberSpec_('id', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, semantic=None, name=None, flow=None, sid=None, type_=None, id=None, valueOf_=None):
self.semantic = _cast(None, semantic)
self.name = _cast(None, name)
self.flow = _cast(None, flow)
self.sid = _cast(None, sid)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if param.subclass:
return param.subclass(*args_, **kwargs_)
else:
return param(*args_, **kwargs_)
factory = staticmethod(factory)
def get_semantic(self): return self.semantic
def set_semantic(self, semantic): self.semantic = semantic
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_flow(self): return self.flow
def set_flow(self, flow): self.flow = flow
def get_sid(self): return self.sid
def set_sid(self, sid): self.sid = sid
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='param', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='param')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='param'):
if self.semantic is not None and 'semantic' not in already_processed:
already_processed.add('semantic')
outfile.write(' semantic=%s' % (self.gds_format_string(quote_attrib(self.semantic).encode(ExternalEncoding), input_name='semantic'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (quote_attrib(self.name), ))
if self.flow is not None and 'flow' not in already_processed:
already_processed.add('flow')
outfile.write(' flow=%s' % (quote_attrib(self.flow), ))
if self.sid is not None and 'sid' not in already_processed:
already_processed.add('sid')
outfile.write(' sid=%s' % (quote_attrib(self.sid), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='param', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='param'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.semantic is not None and 'semantic' not in already_processed:
already_processed.add('semantic')
showIndent(outfile, level)
outfile.write('semantic="%s",\n' % (self.semantic,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.flow is not None and 'flow' not in already_processed:
already_processed.add('flow')
showIndent(outfile, level)
outfile.write('flow=%s,\n' % (self.flow,))
if self.sid is not None and 'sid' not in already_processed:
already_processed.add('sid')
showIndent(outfile, level)
outfile.write('sid="%s",\n' % (self.sid,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('semantic', node)
if value is not None and 'semantic' not in already_processed:
already_processed.add('semantic')
self.semantic = value
self.semantic = ' '.join(self.semantic.split())
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('flow', node)
if value is not None and 'flow' not in already_processed:
already_processed.add('flow')
self.flow = value
value = find_attr_value_('sid', node)
if value is not None and 'sid' not in already_processed:
already_processed.add('sid')
self.sid = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
def walk_and_update(self):
members = param._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if param.superclass != None:
param.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: param depth: %d' % (counter, depth, )
members = param._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class param
class python_programmer(programmer):
member_data_items_ = [
MemberSpec_('nick-name', 'xs:string', 0),
MemberSpec_('favorite_editor', 'xs:string', 0),
]
subclass = None
superclass = programmer
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eldatetime=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, nick_name=None, favorite_editor=None):
super(python_programmer, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eldatetime, eltoken, elshort, ellong, elparam, elarraytypes, )
self.nick_name = _cast(None, nick_name)
self.favorite_editor = favorite_editor
def factory(*args_, **kwargs_):
if python_programmer.subclass:
return python_programmer.subclass(*args_, **kwargs_)
else:
return python_programmer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_favorite_editor(self): return self.favorite_editor
def set_favorite_editor(self, favorite_editor): self.favorite_editor = favorite_editor
def get_nick_name(self): return self.nick_name
def set_nick_name(self, nick_name): self.nick_name = nick_name
def hasContent_(self):
if (
self.favorite_editor is not None or
super(python_programmer, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='python-programmer', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='python-programmer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='python-programmer'):
super(python_programmer, self).exportAttributes(outfile, level, already_processed, namespace_, name_='python-programmer')
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
outfile.write(' nick-name=%s' % (self.gds_format_string(quote_attrib(self.nick_name).encode(ExternalEncoding), input_name='nick-name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='python-programmer', fromsubclass_=False, pretty_print=True):
super(python_programmer, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.favorite_editor is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfavorite-editor>%s</%sfavorite-editor>%s' % (namespace_, self.gds_format_string(quote_xml(self.favorite_editor).encode(ExternalEncoding), input_name='favorite-editor'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='python-programmer'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
showIndent(outfile, level)
outfile.write('nick_name="%s",\n' % (self.nick_name,))
super(python_programmer, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(python_programmer, self).exportLiteralChildren(outfile, level, name_)
if self.favorite_editor is not None:
showIndent(outfile, level)
outfile.write('favorite_editor=%s,\n' % quote_python(self.favorite_editor).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('nick-name', node)
if value is not None and 'nick-name' not in already_processed:
already_processed.add('nick-name')
self.nick_name = value
super(python_programmer, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'favorite-editor':
favorite_editor_ = child_.text
favorite_editor_ = self.gds_validate_string(favorite_editor_, node, 'favorite_editor')
self.favorite_editor = favorite_editor_
super(python_programmer, self).buildChildren(child_, node, nodeName_, True)
def walk_and_update(self):
members = python_programmer._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if python_programmer.superclass != None:
python_programmer.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: python_programmer depth: %d' % (counter, depth, )
members = python_programmer._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class python_programmer
class java_programmer(programmer):
member_data_items_ = [
MemberSpec_('status', 'xs:string', 0),
MemberSpec_('nick-name', 'xs:string', 0),
MemberSpec_('favorite_editor', 'xs:string', 0),
]
subclass = None
superclass = programmer
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eldatetime=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, status=None, nick_name=None, favorite_editor=None):
super(java_programmer, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eldatetime, eltoken, elshort, ellong, elparam, elarraytypes, )
self.status = _cast(None, status)
self.nick_name = _cast(None, nick_name)
self.favorite_editor = favorite_editor
def factory(*args_, **kwargs_):
if java_programmer.subclass:
return java_programmer.subclass(*args_, **kwargs_)
else:
return java_programmer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_favorite_editor(self): return self.favorite_editor
def set_favorite_editor(self, favorite_editor): self.favorite_editor = favorite_editor
def get_status(self): return self.status
def set_status(self, status): self.status = status
def get_nick_name(self): return self.nick_name
def set_nick_name(self, nick_name): self.nick_name = nick_name
def hasContent_(self):
if (
self.favorite_editor is not None or
super(java_programmer, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='java-programmer', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='java-programmer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='java-programmer'):
super(java_programmer, self).exportAttributes(outfile, level, already_processed, namespace_, name_='java-programmer')
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status=%s' % (self.gds_format_string(quote_attrib(self.status).encode(ExternalEncoding), input_name='status'), ))
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
outfile.write(' nick-name=%s' % (self.gds_format_string(quote_attrib(self.nick_name).encode(ExternalEncoding), input_name='nick-name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='java-programmer', fromsubclass_=False, pretty_print=True):
super(java_programmer, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.favorite_editor is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfavorite-editor>%s</%sfavorite-editor>%s' % (namespace_, self.gds_format_string(quote_xml(self.favorite_editor).encode(ExternalEncoding), input_name='favorite-editor'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='java-programmer'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
showIndent(outfile, level)
outfile.write('status="%s",\n' % (self.status,))
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
showIndent(outfile, level)
outfile.write('nick_name="%s",\n' % (self.nick_name,))
super(java_programmer, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(java_programmer, self).exportLiteralChildren(outfile, level, name_)
if self.favorite_editor is not None:
showIndent(outfile, level)
outfile.write('favorite_editor=%s,\n' % quote_python(self.favorite_editor).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
self.status = value
value = find_attr_value_('nick-name', node)
if value is not None and 'nick-name' not in already_processed:
already_processed.add('nick-name')
self.nick_name = value
super(java_programmer, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'favorite-editor':
favorite_editor_ = child_.text
favorite_editor_ = self.gds_validate_string(favorite_editor_, node, 'favorite_editor')
self.favorite_editor = favorite_editor_
super(java_programmer, self).buildChildren(child_, node, nodeName_, True)
def walk_and_update(self):
members = java_programmer._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if java_programmer.superclass != None:
java_programmer.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: java_programmer depth: %d' % (counter, depth, )
members = java_programmer._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class java_programmer
class agent(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('firstname', 'xs:string', 0),
MemberSpec_('lastname', 'xs:string', 0),
MemberSpec_('priority', 'xs:float', 0),
MemberSpec_('info', 'info', 0),
]
subclass = None
superclass = None
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
self.firstname = firstname
self.lastname = lastname
self.priority = priority
self.info = info
def factory(*args_, **kwargs_):
if agent.subclass:
return agent.subclass(*args_, **kwargs_)
else:
return agent(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstname(self): return self.firstname
def set_firstname(self, firstname): self.firstname = firstname
def get_lastname(self): return self.lastname
def set_lastname(self, lastname): self.lastname = lastname
def get_priority(self): return self.priority
def set_priority(self, priority): self.priority = priority
def get_info(self): return self.info
def set_info(self, info): self.info = info
def hasContent_(self):
if (
self.firstname is not None or
self.lastname is not None or
self.priority is not None or
self.info is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='agent', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='agent')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='agent'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='agent', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.firstname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstname>%s</%sfirstname>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstname).encode(ExternalEncoding), input_name='firstname'), namespace_, eol_))
if self.lastname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastname>%s</%slastname>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastname).encode(ExternalEncoding), input_name='lastname'), namespace_, eol_))
if self.priority is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spriority>%s</%spriority>%s' % (namespace_, self.gds_format_float(self.priority, input_name='priority'), namespace_, eol_))
if self.info is not None:
self.info.export(outfile, level, namespace_, name_='info', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='agent'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.firstname is not None:
showIndent(outfile, level)
outfile.write('firstname=%s,\n' % quote_python(self.firstname).encode(ExternalEncoding))
if self.lastname is not None:
showIndent(outfile, level)
outfile.write('lastname=%s,\n' % quote_python(self.lastname).encode(ExternalEncoding))
if self.priority is not None:
showIndent(outfile, level)
outfile.write('priority=%f,\n' % self.priority)
if self.info is not None:
showIndent(outfile, level)
outfile.write('info=model_.info(\n')
self.info.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'firstname':
firstname_ = child_.text
firstname_ = self.gds_validate_string(firstname_, node, 'firstname')
self.firstname = firstname_
elif nodeName_ == 'lastname':
lastname_ = child_.text
lastname_ = self.gds_validate_string(lastname_, node, 'lastname')
self.lastname = lastname_
elif nodeName_ == 'priority':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'priority')
self.priority = fval_
elif nodeName_ == 'info':
obj_ = info.factory()
obj_.build(child_)
self.info = obj_
def walk_and_update(self):
members = agent._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if agent.superclass != None:
agent.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: agent depth: %d' % (counter, depth, )
members = agent._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class agent
class special_agent(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('firstname', 'xs:string', 0),
MemberSpec_('lastname', 'xs:string', 0),
MemberSpec_('priority', 'xs:float', 0),
MemberSpec_('info', 'info', 0),
]
subclass = None
superclass = None
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
self.firstname = firstname
self.lastname = lastname
self.priority = priority
self.info = info
def factory(*args_, **kwargs_):
if special_agent.subclass:
return special_agent.subclass(*args_, **kwargs_)
else:
return special_agent(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstname(self): return self.firstname
def set_firstname(self, firstname): self.firstname = firstname
def get_lastname(self): return self.lastname
def set_lastname(self, lastname): self.lastname = lastname
def get_priority(self): return self.priority
def set_priority(self, priority): self.priority = priority
def get_info(self): return self.info
def set_info(self, info): self.info = info
def hasContent_(self):
if (
self.firstname is not None or
self.lastname is not None or
self.priority is not None or
self.info is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='special-agent', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='special-agent')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='special-agent'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='special-agent', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.firstname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstname>%s</%sfirstname>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstname).encode(ExternalEncoding), input_name='firstname'), namespace_, eol_))
if self.lastname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastname>%s</%slastname>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastname).encode(ExternalEncoding), input_name='lastname'), namespace_, eol_))
if self.priority is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spriority>%s</%spriority>%s' % (namespace_, self.gds_format_float(self.priority, input_name='priority'), namespace_, eol_))
if self.info is not None:
self.info.export(outfile, level, namespace_, name_='info', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='special-agent'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.firstname is not None:
showIndent(outfile, level)
outfile.write('firstname=%s,\n' % quote_python(self.firstname).encode(ExternalEncoding))
if self.lastname is not None:
showIndent(outfile, level)
outfile.write('lastname=%s,\n' % quote_python(self.lastname).encode(ExternalEncoding))
if self.priority is not None:
showIndent(outfile, level)
outfile.write('priority=%f,\n' % self.priority)
if self.info is not None:
showIndent(outfile, level)
outfile.write('info=model_.info(\n')
self.info.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'firstname':
firstname_ = child_.text
firstname_ = self.gds_validate_string(firstname_, node, 'firstname')
self.firstname = firstname_
elif nodeName_ == 'lastname':
lastname_ = child_.text
lastname_ = self.gds_validate_string(lastname_, node, 'lastname')
self.lastname = lastname_
elif nodeName_ == 'priority':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'priority')
self.priority = fval_
elif nodeName_ == 'info':
obj_ = info.factory()
obj_.build(child_)
self.info = obj_
def walk_and_update(self):
members = special_agent._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if special_agent.superclass != None:
special_agent.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: special_agent depth: %d' % (counter, depth, )
members = special_agent._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class special_agent
class booster(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('firstname', 'xs:string', 0),
MemberSpec_('lastname', 'xs:string', 0),
MemberSpec_('other_name', 'xs:float', 0),
MemberSpec_('class_', 'xs:float', 0),
MemberSpec_('other_value', 'xs:float', 1),
MemberSpec_('type_', 'xs:float', 1),
MemberSpec_('client_handler', 'client-handlerType', 1),
]
subclass = None
superclass = None
def __init__(self, firstname=None, lastname=None, other_name=None, class_=None, other_value=None, type_=None, client_handler=None):
self.firstname = firstname
self.lastname = lastname
self.other_name = other_name
self.class_ = class_
if other_value is None:
self.other_value = []
else:
self.other_value = other_value
if type_ is None:
self.type_ = []
else:
self.type_ = type_
if client_handler is None:
self.client_handler = []
else:
self.client_handler = client_handler
def factory(*args_, **kwargs_):
if booster.subclass:
return booster.subclass(*args_, **kwargs_)
else:
return booster(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstname(self): return self.firstname
def set_firstname(self, firstname): self.firstname = firstname
def get_lastname(self): return self.lastname
def set_lastname(self, lastname): self.lastname = lastname
def get_other_name(self): return self.other_name
def set_other_name(self, other_name): self.other_name = other_name
def get_class(self): return self.class_
def set_class(self, class_): self.class_ = class_
def get_other_value(self): return self.other_value
def set_other_value(self, other_value): self.other_value = other_value
def add_other_value(self, value): self.other_value.append(value)
def insert_other_value(self, index, value): self.other_value[index] = value
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def add_type(self, value): self.type_.append(value)
def insert_type(self, index, value): self.type_[index] = value
def get_client_handler(self): return self.client_handler
def set_client_handler(self, client_handler): self.client_handler = client_handler
def add_client_handler(self, value): self.client_handler.append(value)
def insert_client_handler(self, index, value): self.client_handler[index] = value
def hasContent_(self):
if (
self.firstname is not None or
self.lastname is not None or
self.other_name is not None or
self.class_ is not None or
self.other_value or
self.type_ or
self.client_handler
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='booster', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='booster')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='booster'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='booster', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.firstname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstname>%s</%sfirstname>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstname).encode(ExternalEncoding), input_name='firstname'), namespace_, eol_))
if self.lastname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastname>%s</%slastname>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastname).encode(ExternalEncoding), input_name='lastname'), namespace_, eol_))
if self.other_name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sother-name>%s</%sother-name>%s' % (namespace_, self.gds_format_float(self.other_name, input_name='other-name'), namespace_, eol_))
if self.class_ is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sclass>%s</%sclass>%s' % (namespace_, self.gds_format_float(self.class_, input_name='class'), namespace_, eol_))
for other_value_ in self.other_value:
showIndent(outfile, level, pretty_print)
outfile.write('<%sother-value>%s</%sother-value>%s' % (namespace_, self.gds_format_float(other_value_, input_name='other-value'), namespace_, eol_))
for type_ in self.type_:
showIndent(outfile, level, pretty_print)
outfile.write('<%stype>%s</%stype>%s' % (namespace_, self.gds_format_float(type_, input_name='type'), namespace_, eol_))
for client_handler_ in self.client_handler:
client_handler_.export(outfile, level, namespace_, name_='client-handler', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='booster'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.firstname is not None:
showIndent(outfile, level)
outfile.write('firstname=%s,\n' % quote_python(self.firstname).encode(ExternalEncoding))
if self.lastname is not None:
showIndent(outfile, level)
outfile.write('lastname=%s,\n' % quote_python(self.lastname).encode(ExternalEncoding))
if self.other_name is not None:
showIndent(outfile, level)
outfile.write('other_name=%f,\n' % self.other_name)
if self.class_ is not None:
showIndent(outfile, level)
outfile.write('class_=%f,\n' % self.class_)
showIndent(outfile, level)
outfile.write('other_value=[\n')
level += 1
for other_value_ in self.other_value:
showIndent(outfile, level)
outfile.write('%f,\n' % other_value_)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('type_=[\n')
level += 1
for type_ in self.type_:
showIndent(outfile, level)
outfile.write('%f,\n' % type_)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('client_handler=[\n')
level += 1
for client_handler_ in self.client_handler:
showIndent(outfile, level)
outfile.write('model_.client_handlerType(\n')
client_handler_.exportLiteral(outfile, level, name_='client-handlerType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'firstname':
firstname_ = child_.text
firstname_ = self.gds_validate_string(firstname_, node, 'firstname')
self.firstname = firstname_
elif nodeName_ == 'lastname':
lastname_ = child_.text
lastname_ = self.gds_validate_string(lastname_, node, 'lastname')
self.lastname = lastname_
elif nodeName_ == 'other-name':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'other_name')
self.other_name = fval_
elif nodeName_ == 'class':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'class')
self.class_ = fval_
elif nodeName_ == 'other-value':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'other_value')
self.other_value.append(fval_)
elif nodeName_ == 'type':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'type')
self.type_.append(fval_)
elif nodeName_ == 'client-handler':
obj_ = client_handlerType.factory()
obj_.build(child_)
self.client_handler.append(obj_)
def walk_and_update(self):
members = booster._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if booster.superclass != None:
booster.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: booster depth: %d' % (counter, depth, )
members = booster._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class booster
class info(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('rating', 'xs:float', 0),
MemberSpec_('type', 'xs:integer', 0),
MemberSpec_('name', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, rating=None, type_=None, name=None):
self.rating = _cast(float, rating)
self.type_ = _cast(int, type_)
self.name = _cast(None, name)
pass
def factory(*args_, **kwargs_):
if info.subclass:
return info.subclass(*args_, **kwargs_)
else:
return info(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rating(self): return self.rating
def set_rating(self, rating): self.rating = rating
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='info', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='info')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='info'):
if self.rating is not None and 'rating' not in already_processed:
already_processed.add('rating')
outfile.write(' rating="%s"' % self.gds_format_float(self.rating, input_name='rating'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type="%s"' % self.gds_format_integer(self.type_, input_name='type'))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='info', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='info'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rating is not None and 'rating' not in already_processed:
already_processed.add('rating')
showIndent(outfile, level)
outfile.write('rating=%f,\n' % (self.rating,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_=%d,\n' % (self.type_,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rating', node)
if value is not None and 'rating' not in already_processed:
already_processed.add('rating')
try:
self.rating = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (rating): %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
try:
self.type_ = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
def walk_and_update(self):
members = info._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if info.superclass != None:
info.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: info depth: %d' % (counter, depth, )
members = info._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class info
class client_handlerType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('fullname', 'xs:string', 0),
MemberSpec_('refid', 'xs:integer', 0),
]
subclass = None
superclass = None
def __init__(self, fullname=None, refid=None):
self.fullname = fullname
self.refid = refid
def factory(*args_, **kwargs_):
if client_handlerType.subclass:
return client_handlerType.subclass(*args_, **kwargs_)
else:
return client_handlerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fullname(self): return self.fullname
def set_fullname(self, fullname): self.fullname = fullname
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def hasContent_(self):
if (
self.fullname is not None or
self.refid is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='client-handlerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='client-handlerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='client-handlerType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='client-handlerType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.fullname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfullname>%s</%sfullname>%s' % (namespace_, self.gds_format_string(quote_xml(self.fullname).encode(ExternalEncoding), input_name='fullname'), namespace_, eol_))
if self.refid is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%srefid>%s</%srefid>%s' % (namespace_, self.gds_format_integer(self.refid, input_name='refid'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='client-handlerType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.fullname is not None:
showIndent(outfile, level)
outfile.write('fullname=%s,\n' % quote_python(self.fullname).encode(ExternalEncoding))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid=%d,\n' % self.refid)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'fullname':
fullname_ = child_.text
fullname_ = self.gds_validate_string(fullname_, node, 'fullname')
self.fullname = fullname_
elif nodeName_ == 'refid':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'refid')
self.refid = ival_
def walk_and_update(self):
members = client_handlerType._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if client_handlerType.superclass != None:
client_handlerType.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: client_handlerType depth: %d' % (counter, depth, )
members = client_handlerType._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class client_handlerType
GDSClassesMapping = {
'client-handler': client_handlerType,
'elparam': param,
'promoter': booster,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_="people",
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from out2_sup import *\n\n')
sys.stdout.write('import out2_sup as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"agent",
"booster",
"client_handlerType",
"comments",
"info",
"java_programmer",
"param",
"people",
"person",
"programmer",
"python_programmer",
"special_agent"
]<|fim▁end|>
|
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
|
<|file_name|>DialogueBoxSystem.cpp<|end_file_name|><|fim▁begin|>//
// DialogueBoxSystem.cpp
// Chilli Source
// Created by Ian Copland on 04/03/2014.
//
// The MIT License (MIT)
//
// Copyright (c) 2014 Tag Games Limited
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#ifdef CS_TARGETPLATFORM_ANDROID
#include <CSBackend/Platform/Android/Main/JNI/Core/DialogueBox/DialogueBoxSystem.h>
#include <CSBackend/Platform/Android/Main/JNI/Core/DialogueBox/DialogueBoxJavaInterface.h>
#include <CSBackend/Platform/Android/Main/JNI/Core/JNI/JavaInterfaceManager.h>
#include <ChilliSource/Core/Base/Application.h>
#include <ChilliSource/Core/Base/PlatformSystem.h>
namespace CSBackend
{
namespace Android
{
CS_DEFINE_NAMEDTYPE(DialogueBoxSystem);
//----------------------------------------------------
//----------------------------------------------------
DialogueBoxSystem::DialogueBoxSystem()
{
m_dialogueBoxJI = JavaInterfaceManager::GetSingletonPtr()->GetJavaInterface<DialogueBoxJavaInterface>();
if (m_dialogueBoxJI == nullptr)
{
m_dialogueBoxJI = std::make_shared<DialogueBoxJavaInterface>();
JavaInterfaceManager::GetSingletonPtr()->AddJavaInterface(m_dialogueBoxJI);
}
}
//----------------------------------------------------
//----------------------------------------------------
bool DialogueBoxSystem::IsA(CSCore::InterfaceIDType in_interfaceId) const
{
return (DialogueBoxSystem::InterfaceID == in_interfaceId || CSCore::DialogueBoxSystem::InterfaceID == in_interfaceId);
}
//-----------------------------------------------------
//-----------------------------------------------------
void DialogueBoxSystem::ShowSystemDialogue(u32 in_id, const CSCore::DialogueBoxSystem::DialogueDelegate& in_delegate, const std::string& in_title, const std::string& in_message, const std::string& in_confirm)
{
m_dialogueBoxJI->ShowSystemDialogue(in_id, in_title, in_message, in_confirm);
m_activeSysConfirmDelegate = in_delegate;
}
//-----------------------------------------------------
//-----------------------------------------------------
void DialogueBoxSystem::ShowSystemConfirmDialogue(u32 in_id, const CSCore::DialogueBoxSystem::DialogueDelegate& in_delegate, const std::string& in_title, const std::string& in_message, const std::string& in_confirm, const std::string& in_cancel)
{
m_dialogueBoxJI->ShowSystemConfirmDialogue(in_id, in_title, in_message, in_confirm, in_cancel);
m_activeSysConfirmDelegate = in_delegate;
}
//-----------------------------------------------------
//-----------------------------------------------------
void DialogueBoxSystem::MakeToast(const std::string& in_text)
{
m_dialogueBoxJI->MakeToast(in_text);
}
//------------------------------------------------------
//------------------------------------------------------
void DialogueBoxSystem::OnSystemConfirmDialogueResult(u32 in_id, CSCore::DialogueBoxSystem::DialogueResult in_result)
{
if(m_activeSysConfirmDelegate)
{
m_activeSysConfirmDelegate(in_id, in_result);
m_activeSysConfirmDelegate = nullptr;
}
}
//-----------------------------------------------------
//-----------------------------------------------------
DialogueBoxSystem::~DialogueBoxSystem()
{
}
}<|fim▁hole|>}
#endif<|fim▁end|>
| |
<|file_name|>manager.py<|end_file_name|><|fim▁begin|># Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import periodic_task
from cinder import quota
from cinder import utils
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from eventlet import greenpool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = ('available', 'in-use',)
VALID_CREATE_CG_SRC_SNAP_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMISCSIDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
default='none',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSISCSIDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver',
'cinder.volume.drivers.huawei.huawei_hvs.HuaweiHVSFCDriver':
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver', }
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.23'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self._tp = greenpool.GreenPool()
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s" % vol_db_empty)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty)
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s" %
self.driver.configuration.extra_capabilities)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception as err:
LOG.error(_LE('Failed to fetch pool name for volume: %s'),
volume['id'])
LOG.exception(err)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_LE("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
# FIXME volume count for exporting is wrong
LOG.debug("Re-exporting %s volumes" % len(volumes))
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
LOG.error(_LE("Failed to re-export volume %s: "
"setting to error state"), volume['id'])
LOG.exception(export_ex)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
elif volume['status'] in ('downloading', 'creating'):
LOG.info(_LI("volume %(volume_id)s stuck in "
"%(volume_stat)s state. "
"Changing to error state."),
{'volume_id': volume['id'],
'volume_stat': volume['status']})
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
LOG.info(_LI("volume %s: skipping export"), volume['id'])
snapshots = self.db.snapshot_get_by_host(ctxt,
self.host,
{'status': 'creating'})
for snapshot in snapshots:
LOG.info(_LI("snapshot %(snap_id)s stuck in "
"%(snap_stat)s state. "
"Changing to error state."),
{'snap_id': snapshot['id'],
'snap_stat': snapshot['status']})
self.db.snapshot_update(ctxt,
snapshot['id'],
{'status': 'error'})
except Exception as ex:
LOG.error(_LE("Error encountered during "
"re-exporting phase of driver initialization: "
" %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug('Resuming any in progress delete operations')
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_LI('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'])
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
# conditionally run replication status task
stats = self.driver.get_volume_stats(refresh=True)
if stats and stats.get('replication', False):
@periodic_task.periodic_task
def run_replication_task(self, ctxt):
self._update_replication_relationship_status(ctxt)
self.add_periodic_task(run_replication_task)
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None,
source_replicaid=None, consistencygroup_id=None,
cgsnapshot_id=None):
"""Creates the volume."""
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume_id,
allow_reschedule,
context,
request_spec,
filter_properties,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid,
source_replicaid=source_replicaid,
consistencygroup_id=consistencygroup_id,
cgsnapshot_id=cgsnapshot_id)
except Exception:
LOG.exception(_LE("Failed to create manager volume flow"))
raise exception.CinderException(
_("Failed to create manager volume flow."))
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
except Exception as e:
if hasattr(e, 'rescheduled'):
rescheduled = e.rescheduled
raise
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound as e:
# Flow was reverted, fetching volume_ref from the DB.
vol_ref = self.db.volume_get(context, volume_id)
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
return vol_ref['id']
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration source volume
If deleting the source volume in a migration, we want to skip
quotas. Also we want to skip other database updates for source
volume because these update will be handled at
migrate_volume_completion properly.
3. Delete a migration destination volume
If deleting the destination volume in a migration, we want to
skip quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume_ref = self.db.volume_get(context, volume_id)
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.info(_LI("Tried to delete volume %s, but it no longer exists, "
"moving on") % (volume_id))
return True
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_LI("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if (vol_utils.extract_host(volume_ref['host']) != self.host):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
is_migrating = volume_ref['migration_status'] is not None
is_migrating_dest = (is_migrating and
volume_ref['migration_status'].startswith(
'target:'))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("volume %s: removing export", volume_ref['id'])
self.driver.remove_export(context, volume_ref)
LOG.debug("volume %s: deleting", volume_ref['id'])
if unmanage_only:
self.driver.unmanage(volume_ref)
else:
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.error(_LE("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'available')
return True
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume_ref,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume"))
# If deleting the source volume in a migration, we should skip database
# update here. In other cases, continue to update database entries.
if not is_migrating or is_migrating_dest:
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
LOG.info(_LI("volume %s: deleted successfully"), volume_ref['id'])
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_ref['host'], 'pool', True)
size = volume_ref['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
return True
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
self.db.volume_destroy(context, volume_ref['id'])
LOG.error(_LE("Unable to delete the destination volume %s "
"during volume migration, but the database "
"record needs to be deleted."),
volume_ref['id'])
else:
self.db.volume_update(context,
volume_ref['id'],
{'status': status})
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
LOG.info(_LI("snapshot %s: creating"), snapshot.id)
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("snapshot %(snap_id)s: creating",
{'snap_id': snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save(context)
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error'
snapshot.save(context)
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot.id})
snapshot.status = 'error'
snapshot.save(context)
raise exception.MetadataCopyFailure(reason=ex)
snapshot.status = 'available'
snapshot.progress = '100%'
snapshot.save(context)
LOG.info(_("snapshot %s: created successfully"), snapshot.id)
self._notify_about_snapshot_usage(context, snapshot, "create.end")
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot):
"""Deletes and unexports snapshot."""
context = context.elevated()
project_id = snapshot.project_id
LOG.info(_("snapshot %s: deleting"), snapshot.id)
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
LOG.debug("snapshot %s: deleting", snapshot.id)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Cannot delete snapshot %s: snapshot is busy"),
snapshot.id)
snapshot.status = 'available'
snapshot.save()
return True
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error_deleting'
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy(context)
LOG.info(_LI("snapshot %s: deleted successfully"), snapshot.id)
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
msg = _("volume is already attached")
raise exception.InvalidVolume(reason=msg)
attachment = None
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachment = \
self.db.volume_attachment_get_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachment = \
self.db.volume_attachment_get_by_host(context, volume_id,
host_name_sanitized)
if attachment is not None:
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
if volume['migration_status']:
self.db.volume_update(context, volume_id,
{'migration_status': None})
self._notify_about_volume_usage(context, volume, "attach.end")
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.error(_LE("We couldn't find the volume attachment"
" for volume %(volume_id)s and"
" attachment id %(id)s"),
{"volume_id": volume_id,
"id": attachment_id})
raise
else:
# We can try and degrade gracefuly here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_used_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Volume %(id)s is attached to more than one instance"
". A valid attachment_id must be passed to detach"
" this volume") % {'id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
msg = _("Volume %(id)s doesn't have any attachments "
"to detach") % {'id': volume_id}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
LOG.debug("volume %s: removing export", volume_id)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to uninitialized driver."),
{"volume": volume_id})
except Exception as ex:
LOG.exception(_LE("Error detaching volume %(volume)s, "
"due to remove export failure."),
{"volume": volume_id})
raise exception.RemoveExportException(volume=volume_id, reason=ex)
self._notify_about_volume_usage(context, volume, "detach.end")
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \<|fim▁hole|> image_meta)
LOG.debug("Uploaded volume %(volume_id)s to "
"image (%(image_id)s) successfully",
{'volume_id': volume_id, 'image_id': image_id})
except Exception as error:
LOG.error(_LE("Error occurred while uploading "
"volume %(volume_id)s "
"to image %(image_id)s."),
{'volume_id': volume_id, 'image_id': image_meta['id']})
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
if not volume['volume_attachment']:
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warn(_LW("Deleting image %(image_id)s in %(image_status)s "
"state."),
{'image_id': image_id,
'image_status': image_status})
image_service.delete(context, image_id)
except Exception:
LOG.warn(_LW("Error occurred while deleting image %s."),
image_id, exc_info=True)
def _driver_data_namespace(self):
return self.driver.configuration.safe_get('driver_data_namespace') \
or self.driver.configuration.safe_get('volume_backend_name') \
or self.driver.__class__.__name__
def _get_driver_initiator_data(self, context, connector):
data = None
initiator = connector.get('initiator', False)
if initiator:
namespace = self._driver_data_namespace()
try:
data = self.db.driver_initiator_data_get(
context,
initiator,
namespace
)
except exception.CinderException:
LOG.exception(_LE("Failed to get driver initiator data for"
" initiator %(initiator)s and namespace"
" %(namespace)s"),
{'initiator': initiator,
'namespace': namespace})
raise
return data
def _save_driver_initiator_data(self, context, connector, model_update):
if connector.get('initiator', False) and model_update:
namespace = self._driver_data_namespace()
try:
self.db.driver_initiator_data_update(context,
connector['initiator'],
namespace,
model_update)
except exception.CinderException:
LOG.exception(_LE("Failed to update initiator data for"
" initiator %(initiator)s and backend"
" %(backend)s"),
{'initiator': connector['initiator'],
'backend': namespace})
raise
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=err)
except Exception as err:
err_msg = (_('Unable to validate connector information in '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
LOG.debug("Volume %s: creating export", volume_id)
model_update = self.driver.create_export(context.elevated(),
volume)
except exception.CinderException:
err_msg = (_('Unable to create export for volume %(volume_id)s') %
{'volume_id': volume_id})
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating model of volume %(volume_id)s"
" with driver provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise exception.ExportFailure(reason=ex)
initiator_data = self._get_driver_initiator_data(context, connector)
try:
if initiator_data:
conn_info = self.driver.initialize_connection(volume,
connector,
initiator_data)
else:
conn_info = self.driver.initialize_connection(volume,
connector)
except Exception as err:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') % {'err': err})
LOG.error(err_msg)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
initiator_update = conn_info.get('initiator_update', None)
if initiator_update:
self._save_driver_initiator_data(context, connector,
initiator_update)
del conn_info['initiator_update']
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Unable to terminate volume connection: %(err)s')
% {'err': err})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed updating model of "
"volume %(volume_id)s "
"with drivers update %(model)s "
"during xfr.") %
{'volume_id': volume_id,
'model': model_update})
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
return model_update
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
new_vol_values = {}
for k, v in volume.iteritems():
new_vol_values[k] = v
del new_vol_values['id']
del new_vol_values['_name_id']
# We don't copy volume_type because the db sets that according to
# volume_type_id, which we do copy
del new_vol_values['volume_type']
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_vol_values['host'] = host['host']
new_vol_values['status'] = 'creating'
# FIXME(jdg): using a : delimeter is confusing to
# me below here. We're adding a string member to a dict
# using a :, which is kind of a poor choice in this case
# I think
new_vol_values['migration_status'] = 'target:%s' % volume['id']
new_vol_values['attach_status'] = 'detached'
new_vol_values['volume_attachment'] = []
new_volume = self.db.volume_create(ctxt, new_vol_values)
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume = self.db.volume_get(ctxt, new_volume['id'])
tries = 0
while new_volume['status'] != 'available':
tries += 1
now = time.time()
if new_volume['status'] == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'],
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume = self.db.volume_get(ctxt, new_volume['id'])
# Copy the source volume to the destination volume
try:
attachments = volume['volume_attachment']
if not attachments:
self.driver.copy_volume_data(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume['id'],
new_volume['id'],
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume['id'],
new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
msg = _LE("Failed to copy volume %(vol1)s to %(vol2)s")
LOG.error(msg, {'vol1': volume['id'],
'vol2': new_volume['id']})
self._clean_temporary_volume(ctxt, volume['id'],
new_volume['id'])
def _get_original_status(self, volume):
attachments = volume['volume_attachment']
if not attachments:
return 'available'
else:
return 'in-use'
def _clean_temporary_volume(self, ctxt, volume_id, new_volume_id,
clean_db_only=False):
volume = self.db.volume_get(ctxt, volume_id)
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume['migration_status'] == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
self.db.volume_destroy(ctxt, new_volume_id)
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi.delete_volume(ctxt, volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume_id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
updates = {'migration_status': None}
self.db.volume_update(ctxt, new_volume_id, updates)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume_id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume_id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
msg = _("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s")
LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
volume = self.db.volume_get(ctxt, volume_id)
new_volume = self.db.volume_get(ctxt, new_volume_id)
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = self._get_original_status(volume)
if error:
msg = _("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s")
LOG.info(msg % {'vol1': volume['id'],
'vol2': new_volume['id']})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': None, 'status': orig_volume_status}
self.db.volume_update(ctxt, volume_id, updates)
return volume_id
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'completing'})
# Delete the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
self.detach_volume(ctxt, volume_id, attachment['id'])
self.delete_volume(ctxt, volume_id)
except Exception as ex:
msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
LOG.error(msg % {'vol': volume_id, 'err': ex})
# Give driver (new_volume) a chance to update things as needed
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume
rpcapi.update_migrated_volume(ctxt,
volume,
new_volume)
self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
self.db.volume_destroy(ctxt, new_volume_id)
if orig_volume_status == 'in-use':
updates = {'migration_status': 'completing',
'status': orig_volume_status}
else:
updates = {'migration_status': None}
self.db.volume_update(ctxt, volume_id, updates)
if orig_volume_status == 'in-use':
attachments = volume['volume_attachment']
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
return volume['id']
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(ctxt, volume_id,
{'migration_status': 'error'})
volume_ref = self.db.volume_get(ctxt, volume_id)
model_update = None
moved = False
status_update = None
if volume_ref['status'] == 'retyping':
status_update = {'status': self._get_original_status(volume_ref)}
self.db.volume_update(ctxt, volume_ref['id'],
{'migration_status': 'migrating'})
if not force_host_copy and new_type_id is None:
try:
LOG.debug("volume %s: calling driver migrate_volume",
volume_ref['id'])
moved, model_update = self.driver.migrate_volume(ctxt,
volume_ref,
host)
if moved:
updates = {'host': host['host'],
'migration_status': None}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume_ref = self.db.volume_update(ctxt,
volume_ref['id'],
updates)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
try:
model_update = self.driver.create_export(ctxt,
volume_ref)
if model_update:
updates.update(model_update)
except Exception:
LOG.exception(_LE("Failed to create export for "
"volume: %s"), volume_ref['id'])
finally:
self.db.volume_update(ctxt, volume_ref['id'], updates)
if not moved:
try:
self._migrate_volume_generic(ctxt, volume_ref, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': None}
if status_update:
updates.update(status_update)
try:
model_update = self.driver.create_export(ctxt,
volume_ref)
if model_update:
updates.update(model_update)
except Exception:
LOG.exception(_LE("Failed to create export for "
"volume: %s"), volume_ref['id'])
finally:
self.db.volume_update(ctxt, volume_ref['id'], updates)
@periodic_task.periodic_task
def _report_driver_status(self, context):
LOG.info(_LI("Updating volume status"))
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW('Unable to update stats, %(driver_name)s '
'-%(driver_version)s '
'%(config_group)s driver is uninitialized.') %
{'driver_name': self.driver.__class__.__name__,
'driver_version': self.driver.get_version(),
'config_group': config_group})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def notification(self, context, event):
LOG.info(_LI("Notification {%s} received"), event)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group['id'])
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot['id'])
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
volume = self.db.volume_get(context, volume_id)
size_increase = (int(new_size)) - volume['size']
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_LI("volume %s: extending"), volume['id'])
self.driver.extend_volume(volume, new_size)
LOG.info(_LI("volume %s: extended successfully"), volume['id'])
except Exception:
LOG.exception(_LE("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume_id)
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
volume = self.db.volume_update(context,
volume['id'],
{'size': int(new_size),
'status': 'available'})
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None):
def _retype_error(context, volume_id, old_reservations,
new_reservations, status_update):
try:
self.db.volume_update(context, volume_id, status_update)
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
volume_ref = self.db.volume_get(ctxt, volume_id)
status_update = {'status': self._get_original_status(volume_ref)}
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
self.db.volume_update(context, volume_id, status_update)
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
self.db.volume_update(context, volume_id, status_update)
LOG.exception(_LE("Failed to update usages "
"while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume_ref.get('volume_type_id'), new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
if not retyped:
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume_ref,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume_id)
except Exception as ex:
retyped = False
LOG.error(_LE("Volume %s: driver error when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = self.db.snapshot_get_all_for_volume(context,
volume_ref['id'])
if snaps:
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume_ref['replication_status']
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.db.volume_update(context, volume_ref['id'],
{'migration_status': 'starting'})
try:
self.migrate_volume(context, volume_id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume_id, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self.db.volume_update(context, volume_id, model_update)
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self.publish_service_capabilities(context)
def manage_existing(self, ctxt, volume_id, ref=None):
LOG.debug('manage_existing: managing %s.' % ref)
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
LOG.exception(_LE("Failed to create manage_existing flow."))
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to promote replica "
"for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
LOG.debug("Volume %s: promote replica.", volume_id)
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to sync replica for volume %(id)s.")
% {'id': volume_id})
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
LOG.debug("Volume %s: sync replica.", volume_id)
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error synchronizing secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
LOG.info(_LI('Updating volume replication status.'))
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Error checking replication status for "
"volume %s") % vol['id'])
def create_consistencygroup(self, context, group_id):
"""Creates the consistency group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
group_ref['host'] = self.host
status = 'available'
model_update = False
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group_ref['name'])
model_update = self.driver.create_consistencygroup(context,
group_ref)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_ref['id'], model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error'})
LOG.error(_LE("Consistency group %s: create failed"),
group_ref['name'])
now = timeutils.utcnow()
self.db.consistencygroup_update(context,
group_ref['id'],
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %s: created successfully"),
group_ref['name'])
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
return group_ref['id']
def create_consistencygroup_from_src(self, context, group_id,
cgsnapshot_id=None):
"""Creates the consistency group from source.
Currently the source can only be a cgsnapshot.
"""
group_ref = self.db.consistencygroup_get(context, group_id)
try:
volumes = self.db.volume_get_all_by_group(
context, group_id)
cgsnapshot = None
snapshots = None
if cgsnapshot_id:
try:
cgsnapshot = self.db.cgsnapshot_get(context, cgsnapshot_id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Cannot create consistency group %(group)s "
"because cgsnapshot %(snap)s cannot be "
"found."),
{'group': group_id,
'snap': cgsnapshot_id})
raise
if cgsnapshot:
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot_id)
for snap in snapshots:
if (snap['status'] not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group_id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
self._notify_about_consistencygroup_usage(
context, group_ref, "create.start")
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %(group)s: creating from source "
"cgsnapshot %(snap)s."),
{'group': group_id,
'snap': cgsnapshot_id})
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group_ref, volumes, cgsnapshot,
sorted_snapshots))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group_ref = self.db.consistencygroup_update(
context, group_id, model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_id,
{'status': 'error'})
LOG.error(_LE("Consistency group %(group)s: create from "
"source cgsnapshot %(snap)s failed."),
{'group': group_id,
'snap': cgsnapshot_id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update,
group_id=group_id)
self._update_allocated_capacity(vol)
self.db.consistencygroup_update(context,
group_id,
{'status': status,
'created_at': now})
LOG.info(_LI("Consistency group %(group)s: created successfully "
"from source cgsnapshot %(snap)s."),
{'group': group_id,
'snap': cgsnapshot_id})
self._notify_about_consistencygroup_usage(
context, group_ref, "create.end")
return group_ref['id']
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = filter(
lambda snap: snap['id'] == vol['snapshot_id'], snapshots)
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _update_volume_from_src(self, context, vol, update, group_id=None):
try:
snapshot_ref = self.db.snapshot_get(context,
vol['snapshot_id'])
orig_vref = self.db.volume_get(context,
snapshot_ref['volume_id'])
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], vol['snapshot_id'])
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot_ref['volume_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.") %
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group_id:
self.db.consistencygroup_update(
context, group_id, {'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group_id):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
group_ref = self.db.consistencygroup_get(context, group_id)
project_id = group_ref['project_id']
if context.project_id != group_ref['project_id']:
project_id = group_ref['project_id']
else:
project_id = context.project_id
LOG.info(_LI("Consistency group %s: deleting"), group_ref['id'])
volumes = self.db.volume_get_all_by_group(context, group_id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Consistency group %(group_id)s: deleting",
{'group_id': group_id})
model_update, volumes = self.driver.delete_consistencygroup(
context, group_ref)
if volumes:
for volume in volumes:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting consistency group '
'%s.') % group_ref['id'])
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.consistencygroup_update(context, group_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.consistencygroup_update(
context,
group_ref['id'],
{'status': 'error_deleting'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Failed to update usages deleting "
"consistency groups."))
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."))
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
self.db.consistencygroup_destroy(context, group_id)
LOG.info(_LI("Consistency group %s: deleted successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group_ref, "delete.end", volumes)
self.publish_service_capabilities(context)
return True
def update_consistencygroup(self, context, group_id,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
LOG.info(_LI("Consistency group %s: updating"), group_id)
group = self.db.consistencygroup_get(context, group_id)
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume cannot be "
"found."),
{'volume_id': add_vol_ref['id'],
'group_id': group_id})
raise
if add_vol_ref['status'] not in ['in-use', 'available']:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group_id,
'status': add_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because volume "
"cannot be found."),
{'volume_id': remove_vol_ref['id'],
'group_id': group_id})
raise
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Consistency group %(group_id)s: updating",
{'group_id': group['id']})
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in ['error']:
msg = (_('Error occurred when updating consistency group '
'%s.') % group_id)
LOG.exception(msg)
raise exception.VolumeDriverException(message=msg)
self.db.consistencygroup_update(context, group_id,
model_update)
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group_id})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group['id']})
self.db.consistencygroup_update(context, group_id,
{'status': 'error'})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
self.db.consistencygroup_update(context, group_id,
{'status': 'available',
'updated_at': now})
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group_id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
LOG.info(_LI("Consistency group %s: updated successfully."),
group_id)
self._notify_about_consistencygroup_usage(
context, group, "update.end")
return True
def create_cgsnapshot(self, context, group_id, cgsnapshot_id):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.create_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
# Update db if status is error
if snapshot['status'] == 'error':
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
# If status for one snapshot is error, make sure
# the status for the cgsnapshot is also error
if model_update['status'] != 'error':
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error'})
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot['id'], volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
self.db.snapshot_update(context,
snapshot['id'],
{'status': 'error'})
raise exception.MetadataCopyFailure(reason=ex)
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',
'progress': '100%'})
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'available'})
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "create.end")
return cgsnapshot_id
def delete_cgsnapshot(self, context, cgsnapshot_id):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
cgsnapshot_ref = self.db.cgsnapshot_get(context, cgsnapshot_id)
project_id = cgsnapshot_ref['project_id']
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot_ref['id'])
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.start")
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot_id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot_ref['context'] = caller_context
for snapshot in snapshots:
snapshot['context'] = caller_context
model_update, snapshots = \
self.driver.delete_cgsnapshot(context, cgsnapshot_ref)
if snapshots:
for snapshot in snapshots:
update = {'status': snapshot['status']}
self.db.snapshot_update(context, snapshot['id'],
update)
if snapshot['status'] in ['error_deleting', 'error'] and \
model_update['status'] not in \
['error_deleting', 'error']:
model_update['status'] = snapshot['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot_ref['id'])
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
self.db.cgsnapshot_update(context, cgsnapshot_ref['id'],
model_update)
except Exception:
with excutils.save_and_reraise_exception():
self.db.cgsnapshot_update(context,
cgsnapshot_ref['id'],
{'status': 'error_deleting'})
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.db.cgsnapshot_destroy(context, cgsnapshot_id)
LOG.info(_LI("cgsnapshot %s: deleted successfully"),
cgsnapshot_ref['id'])
self._notify_about_cgsnapshot_usage(
context, cgsnapshot_ref, "delete.end", snapshots)
return True
def update_migrated_volume(self, ctxt, volume, new_volume):
"""Finalize migration process on backend device."""
model_update = None
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume)
if model_update:
self.db.volume_update(ctxt.elevated(),
volume['id'],
model_update)<|fim▁end|>
|
glance.get_remote_image_service(context, image_meta['id'])
self.driver.copy_volume_to_image(context, volume, image_service,
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2014 by SiegeLord
//
// All rights reserved. Distributed under ZLib. For full terms see the file LICENSE.
use std::env::var;
fn main()
{
if var("CARGO_FEATURE_LINK_NONE").is_ok()
{
return;<|fim▁hole|>
let debug = match var("CARGO_FEATURE_LINK_DEBUG")
{
Err(_) => "",
Ok(_) => "-debug"
};
let static_ = match var("CARGO_FEATURE_LINK_STATIC")
{
Err(_) => "",
Ok(_) => "-static"
};
println!("cargo:rustc-flags=-l allegro_font{}{}", static_, debug);
}<|fim▁end|>
|
}
|
<|file_name|>Content.tsx<|end_file_name|><|fim▁begin|>import styled from "./Theme";
export const Content = styled.div`<|fim▁hole|>`;<|fim▁end|>
|
margin: 2rem 0;
padding: 5px;
|
<|file_name|>union_fields_1_0.rs<|end_file_name|><|fim▁begin|>#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
#[repr(C)]
pub struct __BindgenUnionField<T>(::std::marker::PhantomData<T>);
impl<T> __BindgenUnionField<T> {
#[inline]
pub fn new() -> Self {
__BindgenUnionField(::std::marker::PhantomData)
}
#[inline]
pub unsafe fn as_ref(&self) -> &T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut(&mut self) -> &mut T {
::std::mem::transmute(self)
}
}
impl<T> ::std::default::Default for __BindgenUnionField<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}<|fim▁hole|> fn clone(&self) -> Self {
Self::new()
}
}
impl<T> ::std::marker::Copy for __BindgenUnionField<T> {}
impl<T> ::std::fmt::Debug for __BindgenUnionField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__BindgenUnionField")
}
}
impl<T> ::std::hash::Hash for __BindgenUnionField<T> {
fn hash<H: ::std::hash::Hasher>(&self, _state: &mut H) {}
}
impl<T> ::std::cmp::PartialEq for __BindgenUnionField<T> {
fn eq(&self, _other: &__BindgenUnionField<T>) -> bool {
true
}
}
impl<T> ::std::cmp::Eq for __BindgenUnionField<T> {}
#[repr(C)]
#[derive(Debug, Default, Copy, Hash, PartialEq)]
pub struct nsStyleUnion {
pub mInt: __BindgenUnionField<::std::os::raw::c_int>,
pub mFloat: __BindgenUnionField<f32>,
pub mPointer: __BindgenUnionField<*mut ::std::os::raw::c_void>,
pub bindgen_union_field: u64,
}
#[test]
fn bindgen_test_layout_nsStyleUnion() {
assert_eq!(
::std::mem::size_of::<nsStyleUnion>(),
8usize,
concat!("Size of: ", stringify!(nsStyleUnion))
);
assert_eq!(
::std::mem::align_of::<nsStyleUnion>(),
8usize,
concat!("Alignment of ", stringify!(nsStyleUnion))
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mInt as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mInt)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mFloat as *const _ as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mFloat)
)
);
assert_eq!(
unsafe {
&(*(::std::ptr::null::<nsStyleUnion>())).mPointer as *const _
as usize
},
0usize,
concat!(
"Offset of field: ",
stringify!(nsStyleUnion),
"::",
stringify!(mPointer)
)
);
}
impl Clone for nsStyleUnion {
fn clone(&self) -> Self {
*self
}
}<|fim▁end|>
|
impl<T> ::std::clone::Clone for __BindgenUnionField<T> {
#[inline]
|
<|file_name|>drizzled.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010,2011 Patrick Crews
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or<|fim▁hole|># MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" drizzled.py: code to allow a serverManager
to provision and start up a drizzled server object
for test execution
"""
# imports
import os
from lib.server_mgmt.server import Server
class drizzleServer(Server):
""" represents a drizzle server, its possessions
(datadir, ports, etc), and methods for controlling
and querying it
TODO: create a base server class that contains
standard methods from which we can inherit
Currently there are definitely methods / attr
which are general
"""
def __init__( self, name, server_manager, code_tree, default_storage_engine
, server_options, requester, test_executor, workdir_root):
super(drizzleServer, self).__init__( name
, server_manager
, code_tree
, default_storage_engine
, server_options
, requester
, test_executor
, workdir_root)
self.preferred_base_port = 9306
# client files
self.drizzledump = self.code_tree.drizzledump
self.drizzle_client = self.code_tree.drizzle_client
self.drizzleimport = self.code_tree.drizzleimport
self.drizzleslap = self.code_tree.drizzleslap
self.server_path = self.code_tree.drizzle_server
self.drizzle_client_path = self.code_tree.drizzle_client
self.schemawriter = self.code_tree.schemawriter
self.trx_reader = self.code_tree.trx_reader
# Get our ports
self.port_block = self.system_manager.port_manager.get_port_block( self.name
, self.preferred_base_port
, 6 )
self.master_port = self.port_block[0]
self.drizzle_tcp_port = self.port_block[1]
self.mc_port = self.port_block[2]
self.pbms_port = self.port_block[3]
self.rabbitmq_node_port = self.port_block[4]
self.json_server_port = self.port_block[5]
# Generate our working directories
self.dirset = {'var_%s' %(self.name): {'std_data_ln':( os.path.join(self.code_tree.testdir,'std_data'))
,'log':None
,'run':None
,'tmp':None
,'master-data': {'local': { 'test':None
, 'mysql':None
}
}
}
}
self.workdir = self.system_manager.create_dirset( workdir_root
, self.dirset)
self.vardir = self.workdir
self.tmpdir = os.path.join(self.vardir,'tmp')
self.rundir = os.path.join(self.vardir,'run')
self.logdir = os.path.join(self.vardir,'log')
self.datadir = os.path.join(self.vardir,'master-data')
self.error_log = os.path.join(self.logdir,'error.log')
self.pid_file = os.path.join(self.rundir,('%s.pid' %(self.name)))
self.socket_file = os.path.join(self.vardir, ('%s.sock' %(self.name)))
if len(self.socket_file) > 107:
# MySQL has a limitation of 107 characters for socket file path
# we copy the mtr workaround of creating one in /tmp
self.logging.verbose("Default socket file path: %s" %(self.socket_file))
self.socket_file = "/tmp/%s_%s.%s.sock" %(self.system_manager.uuid
,self.owner
,self.name)
self.logging.verbose("Changing to alternate: %s" %(self.socket_file))
self.timer_file = os.path.join(self.logdir,('timer'))
# Do magic to create a config file for use with the slave
# plugin
self.slave_config_file = os.path.join(self.logdir,'slave.cnf')
self.create_slave_config_file()
self.snapshot_path = os.path.join(self.tmpdir,('snapshot_%s' %(self.master_port)))
# We want to use --secure-file-priv = $vardir by default
# but there are times / tools when we need to shut this off
if self.no_secure_file_priv:
self.secure_file_string = ''
else:
self.secure_file_string = "--secure-file-priv='%s'" %(self.vardir)
self.user_string = '--user=root'
self.initialize_databases()
self.take_db_snapshot()
self.logging.debug_class(self)
def report(self):
""" We print out some general useful info """
report_values = [ 'name'
, 'master_port'
, 'drizzle_tcp_port'
, 'mc_port'
, 'pbms_port'
, 'rabbitmq_node_port'
, 'vardir'
, 'status'
]
self.logging.info("%s server:" %(self.owner))
for key in report_values:
value = vars(self)[key]
self.logging.info("%s: %s" %(key.upper(), value))
def get_start_cmd(self):
""" Return the command string that will start up the server
as desired / intended
"""
server_args = [ self.process_server_options()
, "--mysql-protocol.port=%d" %(self.master_port)
, "--mysql-protocol.connect-timeout=60"
, "--innodb.data-file-path=ibdata1:20M:autoextend"
, "--sort-buffer-size=256K"
, "--max-heap-table-size=1M"
, "--mysql-unix-socket-protocol.path=%s" %(self.socket_file)
, "--pid-file=%s" %(self.pid_file)
, "--drizzle-protocol.port=%d" %(self.drizzle_tcp_port)
, "--default-storage-engine=%s" %(self.default_storage_engine)
, "--datadir=%s" %(self.datadir)
, "--tmpdir=%s" %(self.tmpdir)
, self.secure_file_string
, self.user_string
]
if self.gdb:
server_args.append('--gdb')
return self.system_manager.handle_gdb_reqs(self, server_args)
else:
return "%s %s %s & " % ( self.cmd_prefix
, self.server_path
, " ".join(server_args)
)
def get_stop_cmd(self):
""" Return the command that will shut us down """
return "%s --user=root --port=%d --connect-timeout=5 --silent --password= --shutdown " %(self.drizzle_client_path, self.master_port)
def get_ping_cmd(self):
"""Return the command string that will
ping / check if the server is alive
"""
return "%s --ping --port=%d --user=root" % (self.drizzle_client_path, self.master_port)
def is_started(self):
""" Determine if the server is up and running -
this may vary from server type to server type
"""
# We experiment with waiting for a pid file to be created vs. pinging
# This is what test-run.pl does and it helps us pass logging_stats tests
# while not self.ping_server(server, quiet=True) and timer != timeout:
return self.system_manager.find_path( [self.pid_file]
, required=0)
def create_slave_config_file(self):
""" Create a config file suitable for use
with the slave-plugin. This allows
us to tie other servers in easily
"""
config_data = [ "[master1]"
, "master-host=127.0.0.1"
, "master-port=%d" %self.master_port
, "master-user=root"
, "master-pass=''"
, "max-reconnects=100"
#, "seconds-between-reconnects=20"
]
outfile = open(self.slave_config_file,'w')
for line in config_data:
outfile.write("%s\n" %(line))
outfile.close()<|fim▁end|>
|
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
<|file_name|>api_samples_test_base.py<|end_file_name|><|fim▁begin|># Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from oslo_serialization import jsonutils
import six
from nova import test
from nova.tests.functional import integrated_helpers
class NoMatch(test.TestingException):
pass
class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
ctype = 'json'
all_extensions = False
extension_name = None
sample_dir = None
request_api_version = None
_use_common_server_api_samples = False
def _pretty_data(self, data):
data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
indent=4)
return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
def _objectify(self, data):
if not data:
return {}
# NOTE(vish): allow non-quoted replacements to survive json<|fim▁hole|>
@classmethod
def _get_sample_path(cls, name, dirname, suffix='', api_version=None):
parts = [dirname]
parts.append('api_samples')
if cls.all_extensions:
parts.append('all_extensions')
# Note(gmann): if _use_common_server_api_samples is set to True
# then common server sample files present in 'servers' directory
# will be used. As of now it is being used for server POST request
# to avoid duplicate copy of server req and resp sample files.
# Example - ServersSampleBase's _post_server method.
elif cls._use_common_server_api_samples:
parts.append('servers')
else:
if cls.sample_dir:
parts.append(cls.sample_dir)
elif cls.extension_name:
parts.append(cls.extension_name)
if api_version:
parts.append('v' + api_version)
parts.append(name + "." + cls.ctype + suffix)
return os.path.join(*parts)
@classmethod
def _get_sample(cls, name, api_version=None):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"../../../doc"))
return cls._get_sample_path(name, dirname, api_version=api_version)
@classmethod
def _get_template(cls, name, api_version=None):
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.normpath(os.path.join(dirname,
"./api_sample_tests"))
return cls._get_sample_path(name, dirname, suffix='.tpl',
api_version=api_version)
def _read_template(self, name):
template = self._get_template(name, self.request_api_version)
with open(template) as inf:
return inf.read().strip()
def _write_template(self, name, data):
with open(self._get_template(name,
self.request_api_version), 'w') as outf:
outf.write(data)
def _write_sample(self, name, data):
with open(self._get_sample(
name, self.request_api_version), 'w') as outf:
outf.write(data)
def _compare_result(self, subs, expected, result, result_str):
matched_value = None
if isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch('%(result_str)s: %(result)s is not a dict.'
% {'result_str': result_str, 'result': result})
ex_keys = sorted(expected.keys())
res_keys = sorted(result.keys())
if ex_keys != res_keys:
ex_delta = []
res_delta = []
for key in ex_keys:
if key not in res_keys:
ex_delta.append(key)
for key in res_keys:
if key not in ex_keys:
res_delta.append(key)
raise NoMatch(
'Dictionary key mismatch:\n'
'Extra key(s) in template:\n%(ex_delta)s\n'
'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' %
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
res = self._compare_result(subs, expected[key], result[key],
result_str)
matched_value = res or matched_value
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
'%(result_str)s: %(result)s is not a list.' %
{'result_str': result_str, 'result': result})
expected = expected[:]
extra = []
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
matched_value = self._compare_result(subs, ex_obj,
res_obj,
result_str)
del expected[i]
break
except NoMatch:
pass
else:
extra.append(res_obj)
error = []
if expected:
error.append('Extra list items in template:')
error.extend([repr(o) for o in expected])
if extra:
error.append('Extra list items in %(result_str)s:' %
{'result_str': result_str})
error.extend([repr(o) for o in extra])
if error:
raise NoMatch('\n'.join(error))
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
expected = expected.replace(char, '\\%s' % char)
# NOTE(vish): special handling of subs that are not quoted. We are
# expecting an int but we had to pass in a string
# so the json would parse properly.
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: %(result)s' %
{'expected': expected, 'result_str': result_str,
'result': result})
try:
matched_value = match.group('id')
except IndexError:
if match.groups():
matched_value = match.groups()[0]
else:
if isinstance(expected, six.string_types):
# NOTE(danms): Ignore whitespace in this comparison
expected = expected.strip()
if isinstance(result, six.string_types):
result = result.strip()
if expected != result:
# NOTE(tdurakov):this attempt to parse string as JSON
# is needed for correct comparison of hypervisor.cpu_info,
# which is stringified JSON object
#
# TODO(tdurakov): remove this check as soon as
# hypervisor.cpu_info become common JSON object in REST API.
try:
expected = self._objectify(expected)
result = self._objectify(result)
return self._compare_result(subs, expected, result,
result_str)
except ValueError:
pass
raise NoMatch(
'Values do not match:\n'
'Template: %(expected)s\n%(result_str)s: '
'%(result)s' % {'expected': expected,
'result_str': result_str,
'result': result})
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
This may be needed by some tests to convert exact matches expected
from the server into pattern matches to verify what is in the
sample file.
If there are no changes to be made, subs is returned unharmed.
"""
return subs
def _verify_response(self, name, subs, response, exp_code):
self.assertEqual(response.status_code, exp_code)
response_data = response.content
response_data = self._pretty_data(response_data)
if not os.path.exists(self._get_template(name,
self.request_api_version)):
self._write_template(name, response_data)
template_data = response_data
else:
template_data = self._read_template(name)
if (self.generate_samples and
not os.path.exists(self._get_sample(
name, self.request_api_version))):
self._write_sample(name, response_data)
sample_data = response_data
else:
with file(self._get_sample(name,
self.request_api_version)) as sample:
sample_data = sample.read()
try:
template_data = self._objectify(template_data)
response_data = self._objectify(response_data)
response_result = self._compare_result(subs, template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
# correct compute host name. Also let the test do some of its
# own generalization, if necessary
vanilla_regexes = self._get_regexes()
subs['compute_host'] = vanilla_regexes['host_name']
subs['id'] = vanilla_regexes['id']
subs = self.generalize_subs(subs, vanilla_regexes)
sample_data = self._objectify(sample_data)
self._compare_result(subs, template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
def _get_host(self):
return 'http://openstack.example.com'
def _get_glance_host(self):
return 'http://glance.openstack.example.com'
def _get_regexes(self):
if self.ctype == 'json':
text = r'(\\"|[^"])*'
else:
text = r'[^<]*'
isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z'
strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}'
xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d '
'\d{2}:\d{2}:\d{2}'
'(\.\d{6})?(\+00:00)?')
# NOTE(claudiub): the x509 keypairs are different from the
# ssh keypairs. For example, the x509 fingerprint has 40 bytes.
return {
'isotime': isotime_re,
'strtime': strtime_re,
'strtime_or_none': r'None|%s' % strtime_re,
'xmltime': xmltime_re,
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'reservation_id': 'r-[0-9a-zA-Z]{8}',
'private_key': '(-----BEGIN RSA PRIVATE KEY-----|)'
'[a-zA-Z0-9\n/+=]*'
'(-----END RSA PRIVATE KEY-----|)',
'public_key': '(ssh-rsa|-----BEGIN CERTIFICATE-----)'
'[ a-zA-Z0-9\n/+=]*'
'(Generated-by-Nova|-----END CERTIFICATE-----)',
'fingerprint': '(([0-9a-f]{2}:){19}|([0-9a-f]{2}:){15})'
'[0-9a-f]{2}',
'keypair_type': 'ssh|x509',
'host': self._get_host(),
'host_name': '[0-9a-z]{32}',
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
'int': '[0-9]+',
'user_id': text,
}
def _get_response(self, url, method, body=None, strip_version=False,
api_version=None):
headers = {}
headers['Content-Type'] = 'application/' + self.ctype
headers['Accept'] = 'application/' + self.ctype
if api_version:
headers['X-OpenStack-Nova-API-Version'] = api_version
return self.api.api_request(url, body=body, method=method,
headers=headers, strip_version=strip_version)
def _do_get(self, url, strip_version=False, api_version=None):
return self._get_response(url, 'GET', strip_version=strip_version,
api_version=(api_version or
self.request_api_version))
def _do_post(self, url, name, subs, method='POST', api_version=None):
body = self._read_template(name) % subs
sample = self._get_sample(name, self.request_api_version)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
return self._get_response(url, method, body,
api_version=(api_version or
self.request_api_version))
def _do_put(self, url, name, subs, api_version=None):
return self._do_post(url, name, subs, method='PUT',
api_version=(api_version or
self.request_api_version))
def _do_delete(self, url, api_version=None):
return self._get_response(url, 'DELETE',
api_version=(api_version or
self.request_api_version))<|fim▁end|>
|
data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
return jsonutils.loads(data)
|
<|file_name|>register.tsx<|end_file_name|><|fim▁begin|>import React from 'react';
import { addons, types } from '@storybook/addons';
import { ADDON_ID, PANEL_ID, PARAM_KEY } from './constants';
import { VisionSimulator } from './components/VisionSimulator';
import { A11YPanel } from './components/A11YPanel';
import { A11yContextProvider } from './components/A11yContext';
addons.register(ADDON_ID, () => {
addons.add(PANEL_ID, {
title: '',
type: types.TOOL,
match: ({ viewMode }) => viewMode === 'story',
render: () => <VisionSimulator />,
});
<|fim▁hole|> render: ({ active = true, key }) => (
<A11yContextProvider key={key} active={active}>
<A11YPanel />
</A11yContextProvider>
),
paramKey: PARAM_KEY,
});
});<|fim▁end|>
|
addons.add(PANEL_ID, {
title: 'Accessibility',
type: types.PANEL,
|
<|file_name|>LengthValidator.java<|end_file_name|><|fim▁begin|>package cn.javay.zheng.common.validator;
import com.baidu.unbiz.fluentvalidator.ValidationError;
import com.baidu.unbiz.fluentvalidator.Validator;
import com.baidu.unbiz.fluentvalidator.ValidatorContext;
import com.baidu.unbiz.fluentvalidator.ValidatorHandler;
/**
* 长度校验
* Created by shuzheng on 2017/2/18.
*/
public class LengthValidator extends ValidatorHandler<String> implements Validator<String> {
private int min;
private int max;
private String fieldName;
public LengthValidator(int min, int max, String fieldName) {
this.min = min;
this.max = max;
this.fieldName = fieldName;
}
@Override
public boolean validate(ValidatorContext context, String s) {
if (null == s || s.length() > max || s.length() < min) {
context.addError(ValidationError.create(String.format("%s长度必须介于%s~%s之间!", fieldName, min, max))
.setErrorCode(-1)
.setField(fieldName)
.setInvalidValue(s));<|fim▁hole|> return false;
}
return true;
}
}<|fim▁end|>
| |
<|file_name|>TestConflictingDefinition.py<|end_file_name|><|fim▁begin|>"""Test that types defined in shared libraries work correctly."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestRealDefinition(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
def test_frame_var_after_stop_at_implementation(self):
"""Test that we can find the implementation for an objective C type"""
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
self.build()
self.shlib_names = ["libTestExt.dylib", "libTest.dylib"]
self.common_setup()
line = line_number('TestExt/TestExt.m', '// break here')
lldbutil.run_break_set_by_file_and_line(
self, 'TestExt.m', line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
self.expect(
"expr 42",<|fim▁hole|> def common_setup(self):
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.registerSharedLibrariesWithTarget(target, self.shlib_names)
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)<|fim▁end|>
|
"A simple expression should execute correctly",
substrs=[
"42"])
|
<|file_name|>socket.go<|end_file_name|><|fim▁begin|>// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package introspection
import (
"fmt"
"net"
"net/http"
"runtime"
"github.com/juju/errors"
"github.com/juju/loggo"
"gopkg.in/tomb.v1"
"gopkg.in/yaml.v2"
"github.com/juju/juju/worker"
"github.com/juju/juju/worker/introspection/pprof"
)
var logger = loggo.GetLogger("juju.worker.introspection")
// DepEngineReporter provides insight into the running dependency engine of the agent.
type DepEngineReporter interface {
// Report returns a map describing the state of the receiver. It is expected
// to be goroutine-safe.
Report() map[string]interface{}
}
// Config describes the arguments required to create the introspection worker.
type Config struct {
SocketName string
Reporter DepEngineReporter
}
// Validate checks the config values to assert they are valid to create the worker.
func (c *Config) Validate() error {
if c.SocketName == "" {
return errors.NotValidf("empty SocketName")
}
return nil
}
// socketListener is a worker and constructed with NewWorker.
type socketListener struct {
tomb tomb.Tomb
listener *net.UnixListener
reporter DepEngineReporter
done chan struct{}
}
// NewWorker starts an http server listening on an abstract domain socket
// which will be created with the specified name.
func NewWorker(config Config) (worker.Worker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
if runtime.GOOS != "linux" {
return nil, errors.NotSupportedf("os %q", runtime.GOOS)
}
path := "@" + config.SocketName
addr, err := net.ResolveUnixAddr("unix", path)
if err != nil {
return nil, errors.Annotate(err, "unable to resolve unix socket")
}
l, err := net.ListenUnix("unix", addr)
if err != nil {
return nil, errors.Annotate(err, "unable to listen on unix socket")
}
logger.Debugf("introspection worker listening on %q", path)
w := &socketListener{
listener: l,
reporter: config.Reporter,
done: make(chan struct{}),
}
go w.serve()
go w.run()
return w, nil
}
func (w *socketListener) serve() {
mux := http.NewServeMux()
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/depengine/", http.HandlerFunc(w.depengineReport))
srv := http.Server{
Handler: mux,
}
logger.Debugf("stats worker now servering")
defer logger.Debugf("stats worker servering finished")
defer close(w.done)
srv.Serve(w.listener)
}
func (w *socketListener) run() {
defer w.tomb.Done()
defer logger.Debugf("stats worker finished")
<-w.tomb.Dying()
logger.Debugf("stats worker closing listener")
w.listener.Close()
// Don't mark the worker as done until the serve goroutine has finished.
<-w.done
}
// Kill implements worker.Worker.
func (w *socketListener) Kill() {
w.tomb.Kill(nil)
}
// Wait implements worker.Worker.
func (w *socketListener) Wait() error {
return w.tomb.Wait()
}
func (s *socketListener) depengineReport(w http.ResponseWriter, r *http.Request) {
if s.reporter == nil {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintln(w, "missing reporter")
return<|fim▁hole|> fmt.Fprintf(w, "error: %v\n", err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, "Dependency Engine Report\n\n")
w.Write(bytes)
}<|fim▁end|>
|
}
bytes, err := yaml.Marshal(s.reporter.Report())
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
|
<|file_name|>webhook-notification-spec.js<|end_file_name|><|fim▁begin|>import WebhookNotification, {
LinkClick,
LinkClickCount,
MessageTrackingData,
WebhookDelta,
WebhookObjectAttributes,
WebhookObjectData,
} from '../src/models/webhook-notification';
import { WebhookTriggers } from '../src/models/webhook';
describe('Webhook Notification', () => {
test('Should deserialize from JSON properly', done => {
const webhookNotificationJSON = {
deltas: [
{
date: 1602623196,
object: 'message',
type: 'message.created',
object_data: {
namespace_id: 'aaz875kwuvxik6ku7pwkqp3ah',
account_id: 'aaz875kwuvxik6ku7pwkqp3ah',
object: 'message',
attributes: {
action: 'save_draft',
job_status_id: 'abc1234',
thread_id: '2u152dt4tnq9j61j8seg26ni6',
received_date: 1602623166,
},
id: '93mgpjynqqu5fohl2dvv6ray7',
metadata: {
sender_app_id: 64280,
link_data: [
{
url: 'https://nylas.com/',
count: 1,
},
],<|fim▁hole|> link_index: 0,
id: 0,
user_agent:
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36',
timestamp: 1602623980,
},
],
message_id: '4utnziee7bu2ohak56wfxe39p',
payload: 'Tracking enabled',
},
},
},
],
};
const webhookNotification = new WebhookNotification().fromJSON(
webhookNotificationJSON
);
expect(webhookNotification.deltas.length).toBe(1);
const webhookDelta = webhookNotification.deltas[0];
expect(webhookDelta instanceof WebhookDelta).toBe(true);
expect(webhookDelta.date).toEqual(new Date(1602623196 * 1000));
expect(webhookDelta.object).toEqual('message');
expect(webhookDelta.type).toEqual(WebhookTriggers.MessageCreated);
const webhookDeltaObjectData = webhookDelta.objectData;
expect(webhookDeltaObjectData instanceof WebhookObjectData).toBe(true);
expect(webhookDeltaObjectData.id).toEqual('93mgpjynqqu5fohl2dvv6ray7');
expect(webhookDeltaObjectData.accountId).toEqual(
'aaz875kwuvxik6ku7pwkqp3ah'
);
expect(webhookDeltaObjectData.namespaceId).toEqual(
'aaz875kwuvxik6ku7pwkqp3ah'
);
expect(webhookDeltaObjectData.object).toEqual('message');
const webhookDeltaObjectAttributes =
webhookDeltaObjectData.objectAttributes;
expect(
webhookDeltaObjectAttributes instanceof WebhookObjectAttributes
).toBe(true);
expect(webhookDeltaObjectAttributes.action).toEqual('save_draft');
expect(webhookDeltaObjectAttributes.jobStatusId).toEqual('abc1234');
expect(webhookDeltaObjectAttributes.threadId).toEqual(
'2u152dt4tnq9j61j8seg26ni6'
);
expect(webhookDeltaObjectAttributes.receivedDate).toEqual(
new Date(1602623166 * 1000)
);
const webhookMessageTrackingData = webhookDeltaObjectData.metadata;
expect(webhookMessageTrackingData instanceof MessageTrackingData).toBe(
true
);
expect(webhookMessageTrackingData.messageId).toEqual(
'4utnziee7bu2ohak56wfxe39p'
);
expect(webhookMessageTrackingData.payload).toEqual('Tracking enabled');
expect(webhookMessageTrackingData.timestamp).toEqual(
new Date(1602623966 * 1000)
);
expect(webhookMessageTrackingData.senderAppId).toBe(64280);
expect(webhookMessageTrackingData.linkData.length).toBe(1);
expect(webhookMessageTrackingData.recents.length).toBe(1);
const linkData = webhookMessageTrackingData.linkData[0];
expect(linkData instanceof LinkClickCount).toBe(true);
expect(linkData.url).toEqual('https://nylas.com/');
expect(linkData.count).toBe(1);
const recents = webhookMessageTrackingData.recents[0];
expect(recents instanceof LinkClick).toBe(true);
expect(recents.id).toBe(0);
expect(recents.ip).toEqual('24.243.155.85');
expect(recents.userAgent).toEqual(
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
);
expect(recents.timestamp).toEqual(new Date(1602623980 * 1000));
expect(recents.linkIndex).toBe(0);
done();
});
});<|fim▁end|>
|
timestamp: 1602623966,
recents: [
{
ip: '24.243.155.85',
|
<|file_name|>subject.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|>/// -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: t -*-
///
/// \file subject.cpp
/// \author Martin Reddy
/// \brief The subject of an observer relationship.
///
/// Copyright (c) 2010, Martin Reddy. All rights reserved.
/// Distributed under the X11/MIT License. See LICENSE.txt.
/// See http://APIBook.com/ for the latest version.
///
#include "subject.h"<|fim▁end|>
| |
<|file_name|>ui-grid.language.sv.js<|end_file_name|><|fim▁begin|>/*!
* ui-grid - v4.8.2 - 2019-10-07
* Copyright (c) 2019 ; License: MIT
*/
(function () {
angular.module('ui.grid').config(['$provide', function($provide) {
$provide.decorator('i18nService', ['$delegate', function($delegate) {
$delegate.add('sv', {
headerCell: {
aria: {
defaultFilterLabel: 'Kolumnfilter',
removeFilter: 'Ta bort filter',
columnMenuButtonLabel: 'Kolumnmeny',
column: 'Kolumn'
},
priority: 'Prioritet:',
filterLabel: "Filter för kolumn: "
},
aggregate: {
label: 'Poster'
},
groupPanel: {
description: 'Dra en kolumnrubrik hit och släpp den för att gruppera efter den kolumnen.'
},
search: {
aria: {
selected: 'Rad är vald',
notSelected: 'Rad är inte vald'
},
placeholder: 'Sök...',
showingItems: 'Visar:',
selectedItems: 'Valda:',
totalItems: 'Antal:',
size: 'Sidstorlek:',
first: 'Första sidan',
next: 'Nästa sida',
previous: 'Föregående sida',
last: 'Sista sidan'
},
menu: {
text: 'Välj kolumner:'
},
sort: {
ascending: 'Sortera stigande',
descending: 'Sortera fallande',
none: 'Ingen sortering',
remove: 'Inaktivera sortering'
},
column: {
hide: 'Göm kolumn'
},
aggregation: {
count: 'Antal rader: ',
sum: 'Summa: ',
avg: 'Genomsnitt: ',
min: 'Min: ',
max: 'Max: '
},
pinning: {
pinLeft: 'Fäst vänster',
pinRight: 'Fäst höger',
unpin: 'Lösgör'
},
columnMenu: {
close: 'Stäng'
},
gridMenu: {
aria: {
buttonLabel: 'Meny'
},
columns: 'Kolumner:',
importerTitle: 'Importera fil',
exporterAllAsCsv: 'Exportera all data som CSV',
exporterVisibleAsCsv: 'Exportera synlig data som CSV',
exporterSelectedAsCsv: 'Exportera markerad data som CSV',
exporterAllAsPdf: 'Exportera all data som PDF',
exporterVisibleAsPdf: 'Exportera synlig data som PDF',
exporterSelectedAsPdf: 'Exportera markerad data som PDF',
exporterAllAsExcel: 'Exportera all data till Excel',
exporterVisibleAsExcel: 'Exportera synlig data till Excel',
exporterSelectedAsExcel: 'Exportera markerad data till Excel',
clearAllFilters: 'Nollställ alla filter'
},
importer: {
noHeaders: 'Kolumnnamn kunde inte härledas. Har filen ett sidhuvud?',
noObjects: 'Objekt kunde inte härledas. Har filen data undantaget sidhuvud?',
invalidCsv: 'Filen kunde inte behandlas, är den en giltig CSV?',
invalidJson: 'Filen kunde inte behandlas, är den en giltig JSON?',
jsonNotArray: 'Importerad JSON-fil måste innehålla ett fält. Import avbruten.'
},
pagination: {
aria: {
pageToFirst: 'Gå till första sidan',
pageBack: 'Gå en sida bakåt',
pageSelected: 'Vald sida',
pageForward: 'Gå en sida framåt',
pageToLast: 'Gå till sista sidan'
},
sizes: 'Poster per sida',
totalItems: 'Poster',
through: 'genom',
of: 'av'
},
grouping: {
group: 'Gruppera',
ungroup: 'Dela upp',
aggregate_count: 'Agg: Antal',
aggregate_sum: 'Agg: Summa',
aggregate_max: 'Agg: Max',
aggregate_min: 'Agg: Min',
aggregate_avg: 'Agg: Genomsnitt',
aggregate_remove: 'Agg: Ta bort'
},
validate: {
error: 'Error:',
minLength: 'Värdet borde vara minst THRESHOLD tecken långt.',
maxLength: 'Värdet borde vara max THRESHOLD tecken långt.',
required: 'Ett värde krävs.'
}
});
return $delegate;<|fim▁hole|> }]);
})();<|fim▁end|>
|
}]);
|
<|file_name|>CommentMgmtService.java<|end_file_name|><|fim▁begin|>/*
* Symphony - A modern community (forum/SNS/blog) platform written in Java.
* Copyright (C) 2012-2017, b3log.org & hacpai.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.b3log.symphony.service;
import java.util.List;
import java.util.Locale;
import javax.inject.Inject;
import org.apache.commons.lang.StringUtils;
import org.b3log.latke.Keys;
import org.b3log.latke.event.Event;
import org.b3log.latke.event.EventException;
import org.b3log.latke.event.EventManager;
import org.b3log.latke.logging.Level;
import org.b3log.latke.logging.Logger;
import org.b3log.latke.model.User;
import org.b3log.latke.repository.RepositoryException;
import org.b3log.latke.repository.Transaction;
import org.b3log.latke.repository.annotation.Transactional;
import org.b3log.latke.service.LangPropsService;
import org.b3log.latke.service.ServiceException;
import org.b3log.latke.service.annotation.Service;
import org.b3log.latke.util.Ids;
import org.b3log.symphony.event.EventTypes;
import org.b3log.symphony.model.Article;
import org.b3log.symphony.model.Comment;
import org.b3log.symphony.model.Common;
import org.b3log.symphony.model.Liveness;
import org.b3log.symphony.model.Notification;
import org.b3log.symphony.model.Option;
import org.b3log.symphony.model.Pointtransfer;
import org.b3log.symphony.model.Reward;
import org.b3log.symphony.model.Role;
import org.b3log.symphony.model.Tag;
import org.b3log.symphony.model.UserExt;
import org.b3log.symphony.repository.ArticleRepository;
import org.b3log.symphony.repository.CommentRepository;
import org.b3log.symphony.repository.NotificationRepository;
import org.b3log.symphony.repository.OptionRepository;
import org.b3log.symphony.repository.TagArticleRepository;
import org.b3log.symphony.repository.TagRepository;
import org.b3log.symphony.repository.UserRepository;
import org.b3log.symphony.util.Emotions;
import org.b3log.symphony.util.Symphonys;
import org.json.JSONObject;
/**
* Comment management service.
*
* @author <a href="http://88250.b3log.org">Liang Ding</a>
* @version 2.12.10.19, Feb 2, 2017
* @since 0.2.0
*/
@Service
public class CommentMgmtService {
/**
* Logger.
*/
private static final Logger LOGGER = Logger.getLogger(CommentMgmtService.class.getName());
/**
* Comment repository.
*/
@Inject
private CommentRepository commentRepository;
/**
* Article repository.
*/
@Inject
private ArticleRepository articleRepository;
/**
* Option repository.
*/
@Inject
private OptionRepository optionRepository;
/**
* Tag repository.
*/
@Inject
private TagRepository tagRepository;
/**
* Tag-Article repository.
*/
@Inject
private TagArticleRepository tagArticleRepository;
/**
* User repository.
*/
@Inject
private UserRepository userRepository;
/**
* Notification repository.
*/
@Inject
private NotificationRepository notificationRepository;
/**
* Event manager.
*/
@Inject
private EventManager eventManager;
/**
* Language service.
*/
@Inject
private LangPropsService langPropsService;
/**
* Pointtransfer management service.
*/
@Inject
private PointtransferMgmtService pointtransferMgmtService;
/**
* Reward management service.
*/
@Inject
private RewardMgmtService rewardMgmtService;
/**
* Reward query service.
*/
@Inject
private RewardQueryService rewardQueryService;
/**
* Notification management service.
*/
@Inject
private NotificationMgmtService notificationMgmtService;
/**
* Liveness management service.
*/
@Inject
private LivenessMgmtService livenessMgmtService;
/**
* Removes a comment specified with the given comment id.
*
* @param commentId the given comment id
*/
@Transactional
public void removeComment(final String commentId) {
try {
final JSONObject comment = commentRepository.get(commentId);
if (null == comment) {
return;
}
final String articleId = comment.optString(Comment.COMMENT_ON_ARTICLE_ID);
final JSONObject article = articleRepository.get(articleId);
article.put(Article.ARTICLE_COMMENT_CNT, article.optInt(Article.ARTICLE_COMMENT_CNT) - 1);
// Just clear latest time and commenter name, do not get the real latest comment to update
article.put(Article.ARTICLE_LATEST_CMT_TIME, 0);
article.put(Article.ARTICLE_LATEST_CMTER_NAME, "");
articleRepository.update(articleId, article);
final String commentAuthorId = comment.optString(Comment.COMMENT_AUTHOR_ID);
final JSONObject commenter = userRepository.get(commentAuthorId);
commenter.put(UserExt.USER_COMMENT_COUNT, commenter.optInt(UserExt.USER_COMMENT_COUNT) - 1);
userRepository.update(commentAuthorId, commenter);
commentRepository.remove(comment.optString(Keys.OBJECT_ID));
final JSONObject commentCntOption = optionRepository.get(Option.ID_C_STATISTIC_CMT_COUNT);
commentCntOption.put(Option.OPTION_VALUE, commentCntOption.optInt(Option.OPTION_VALUE) - 1);
optionRepository.update(Option.ID_C_STATISTIC_CMT_COUNT, commentCntOption);
notificationRepository.removeByDataId(commentId);
} catch (final Exception e) {
LOGGER.log(Level.ERROR, "Removes a comment error [id=" + commentId + "]", e);
}
}
/**
* A user specified by the given sender id thanks the author of a comment specified by the given comment id.
*
* @param commentId the given comment id
* @param senderId the given sender id
* @throws ServiceException service exception
*/
public void thankComment(final String commentId, final String senderId) throws ServiceException {
try {
final JSONObject comment = commentRepository.get(commentId);
if (null == comment) {
return;
}
if (Comment.COMMENT_STATUS_C_INVALID == comment.optInt(Comment.COMMENT_STATUS)) {
return;
}
final JSONObject sender = userRepository.get(senderId);
if (null == sender) {
return;
}
if (UserExt.USER_STATUS_C_VALID != sender.optInt(UserExt.USER_STATUS)) {
return;
}
final String receiverId = comment.optString(Comment.COMMENT_AUTHOR_ID);
final JSONObject receiver = userRepository.get(receiverId);
if (null == receiver) {
return;
}
if (UserExt.USER_STATUS_C_VALID != receiver.optInt(UserExt.USER_STATUS)) {
return;
}
if (receiverId.equals(senderId)) {
throw new ServiceException(langPropsService.get("thankSelfLabel"));
}
final int rewardPoint = Symphonys.getInt("pointThankComment");
if (rewardQueryService.isRewarded(senderId, commentId, Reward.TYPE_C_COMMENT)) {
return;
}
final String rewardId = Ids.genTimeMillisId();
if (Comment.COMMENT_ANONYMOUS_C_PUBLIC == comment.optInt(Comment.COMMENT_ANONYMOUS)) {
final boolean succ = null != pointtransferMgmtService.transfer(senderId, receiverId,
Pointtransfer.TRANSFER_TYPE_C_COMMENT_REWARD, rewardPoint, rewardId, System.currentTimeMillis());
if (!succ) {
throw new ServiceException(langPropsService.get("transferFailLabel"));
}
}
final JSONObject reward = new JSONObject();
reward.put(Keys.OBJECT_ID, rewardId);
reward.put(Reward.SENDER_ID, senderId);
reward.put(Reward.DATA_ID, commentId);
reward.put(Reward.TYPE, Reward.TYPE_C_COMMENT);
rewardMgmtService.addReward(reward);
final JSONObject notification = new JSONObject();
notification.put(Notification.NOTIFICATION_USER_ID, receiverId);
notification.put(Notification.NOTIFICATION_DATA_ID, rewardId);
notificationMgmtService.addCommentThankNotification(notification);
livenessMgmtService.incLiveness(senderId, Liveness.LIVENESS_THANK);
} catch (final RepositoryException e) {
LOGGER.log(Level.ERROR, "Thanks a comment[id=" + commentId + "] failed", e);
throw new ServiceException(e);
}
}
/**
* Adds a comment with the specified request json object.
*
* @param requestJSONObject the specified request json object, for example, <pre>
* {
* "commentContent": "",
* "commentAuthorId": "",
* "commentOnArticleId": "",
* "commentOriginalCommentId": "", // optional
* "clientCommentId": "" // optional,
* "commentAuthorName": "" // If from client
* "commenter": {
* // User model
* },
* "commentIP": "", // optional, default to ""
* "commentUA": "", // optional, default to ""
* "commentAnonymous": int, // optional, default to 0 (public)
* "userCommentViewMode": int
* }
* </pre>, see {@link Comment} for more details
*
* @return generated comment id
* @throws ServiceException service exception
*/
public synchronized String addComment(final JSONObject requestJSONObject) throws ServiceException {
final long currentTimeMillis = System.currentTimeMillis();
final JSONObject commenter = requestJSONObject.optJSONObject(Comment.COMMENT_T_COMMENTER);
final String commentAuthorId = requestJSONObject.optString(Comment.COMMENT_AUTHOR_ID);
final boolean fromClient = requestJSONObject.has(Comment.COMMENT_CLIENT_COMMENT_ID);
final String articleId = requestJSONObject.optString(Comment.COMMENT_ON_ARTICLE_ID);
final String ip = requestJSONObject.optString(Comment.COMMENT_IP);
String ua = requestJSONObject.optString(Comment.COMMENT_UA);
final int commentAnonymous = requestJSONObject.optInt(Comment.COMMENT_ANONYMOUS);
final int commentViewMode = requestJSONObject.optInt(UserExt.USER_COMMENT_VIEW_MODE);
if (currentTimeMillis - commenter.optLong(UserExt.USER_LATEST_CMT_TIME) < Symphonys.getLong("minStepCmtTime")
&& !Role.ROLE_ID_C_ADMIN.equals(commenter.optString(User.USER_ROLE))
&& !UserExt.DEFAULT_CMTER_ROLE.equals(commenter.optString(User.USER_ROLE))) {
LOGGER.log(Level.WARN, "Adds comment too frequent [userName={0}]", commenter.optString(User.USER_NAME));
throw new ServiceException(langPropsService.get("tooFrequentCmtLabel"));
}
final String commenterName = commenter.optString(User.USER_NAME);
JSONObject article = null;
try {
// check if admin allow to add comment
final JSONObject option = optionRepository.get(Option.ID_C_MISC_ALLOW_ADD_COMMENT);
if (!"0".equals(option.optString(Option.OPTION_VALUE))) {
throw new ServiceException(langPropsService.get("notAllowAddCommentLabel"));
}
final int balance = commenter.optInt(UserExt.USER_POINT);
if (Comment.COMMENT_ANONYMOUS_C_ANONYMOUS == commentAnonymous) {
final int anonymousPoint = Symphonys.getInt("anonymous.point");
if (balance < anonymousPoint) {
String anonymousEnabelPointLabel = langPropsService.get("anonymousEnabelPointLabel");
anonymousEnabelPointLabel
= anonymousEnabelPointLabel.replace("${point}", String.valueOf(anonymousPoint));
throw new ServiceException(anonymousEnabelPointLabel);
}
}
article = articleRepository.get(articleId);
if (!fromClient && !TuringQueryService.ROBOT_NAME.equals(commenterName)) {
int pointSum = Pointtransfer.TRANSFER_SUM_C_ADD_COMMENT;
// Point
final String articleAuthorId = article.optString(Article.ARTICLE_AUTHOR_ID);
if (articleAuthorId.equals(commentAuthorId)) {
pointSum = Pointtransfer.TRANSFER_SUM_C_ADD_SELF_ARTICLE_COMMENT;
}
if (balance - pointSum < 0) {
throw new ServiceException(langPropsService.get("insufficientBalanceLabel"));
}
}
} catch (final RepositoryException e) {
throw new ServiceException(e);
}
final int articleAnonymous = article.optInt(Article.ARTICLE_ANONYMOUS);
final Transaction transaction = commentRepository.beginTransaction();
try {
article.put(Article.ARTICLE_COMMENT_CNT, article.optInt(Article.ARTICLE_COMMENT_CNT) + 1);
article.put(Article.ARTICLE_LATEST_CMTER_NAME, commenter.optString(User.USER_NAME));
if (Comment.COMMENT_ANONYMOUS_C_ANONYMOUS == commentAnonymous) {
article.put(Article.ARTICLE_LATEST_CMTER_NAME, UserExt.ANONYMOUS_USER_NAME);
}
article.put(Article.ARTICLE_LATEST_CMT_TIME, currentTimeMillis);
final String ret = Ids.genTimeMillisId();
final JSONObject comment = new JSONObject();
comment.put(Keys.OBJECT_ID, ret);
String content = requestJSONObject.optString(Comment.COMMENT_CONTENT).
replace("_esc_enter_88250_", "<br/>"); // Solo client escape
comment.put(Comment.COMMENT_AUTHOR_ID, commentAuthorId);
comment.put(Comment.COMMENT_ON_ARTICLE_ID, articleId);
if (fromClient) {
comment.put(Comment.COMMENT_CLIENT_COMMENT_ID, requestJSONObject.optString(Comment.COMMENT_CLIENT_COMMENT_ID));
// Appends original commenter name
final String authorName = requestJSONObject.optString(Comment.COMMENT_T_AUTHOR_NAME);
content += " <i class='ft-small'>by " + authorName + "</i>";
}
final String originalCmtId = requestJSONObject.optString(Comment.COMMENT_ORIGINAL_COMMENT_ID);
comment.put(Comment.COMMENT_ORIGINAL_COMMENT_ID, originalCmtId);
if (StringUtils.isNotBlank(originalCmtId)) {
final JSONObject originalCmt = commentRepository.get(originalCmtId);
final int originalCmtReplyCnt = originalCmt.optInt(Comment.COMMENT_REPLY_CNT);
originalCmt.put(Comment.COMMENT_REPLY_CNT, originalCmtReplyCnt + 1);
commentRepository.update(originalCmtId, originalCmt);
}
content = Emotions.toAliases(content);
// content = StringUtils.trim(content) + " "; https://github.com/b3log/symphony/issues/389
content = content.replace(langPropsService.get("uploadingLabel", Locale.SIMPLIFIED_CHINESE), "");
content = content.replace(langPropsService.get("uploadingLabel", Locale.US), "");
comment.put(Comment.COMMENT_CONTENT, content);
comment.put(Comment.COMMENT_CREATE_TIME, System.currentTimeMillis());
comment.put(Comment.COMMENT_SHARP_URL, "/article/" + articleId + "#" + ret);
comment.put(Comment.COMMENT_STATUS, Comment.COMMENT_STATUS_C_VALID);
comment.put(Comment.COMMENT_IP, ip);
if (StringUtils.length(ua) > Common.MAX_LENGTH_UA) {
LOGGER.log(Level.WARN, "UA is too long [" + ua + "]");
ua = StringUtils.substring(ua, 0, Common.MAX_LENGTH_UA);
}
comment.put(Comment.COMMENT_UA, ua);
comment.put(Comment.COMMENT_ANONYMOUS, commentAnonymous);
final JSONObject cmtCntOption = optionRepository.get(Option.ID_C_STATISTIC_CMT_COUNT);
final int cmtCnt = cmtCntOption.optInt(Option.OPTION_VALUE);
cmtCntOption.put(Option.OPTION_VALUE, String.valueOf(cmtCnt + 1));
articleRepository.update(articleId, article); // Updates article comment count, latest commenter name and time
optionRepository.update(Option.ID_C_STATISTIC_CMT_COUNT, cmtCntOption); // Updates global comment count
// Updates tag comment count and User-Tag relation
final String tagsString = article.optString(Article.ARTICLE_TAGS);
final String[] tagStrings = tagsString.split(",");
for (int i = 0; i < tagStrings.length; i++) {
final String tagTitle = tagStrings[i].trim();
final JSONObject tag = tagRepository.getByTitle(tagTitle);
tag.put(Tag.TAG_COMMENT_CNT, tag.optInt(Tag.TAG_COMMENT_CNT) + 1);
tag.put(Tag.TAG_RANDOM_DOUBLE, Math.random());
tagRepository.update(tag.optString(Keys.OBJECT_ID), tag);
}
// Updates user comment count, latest comment time
commenter.put(UserExt.USER_COMMENT_COUNT, commenter.optInt(UserExt.USER_COMMENT_COUNT) + 1);
commenter.put(UserExt.USER_LATEST_CMT_TIME, currentTimeMillis);
userRepository.update(commenter.optString(Keys.OBJECT_ID), commenter);
comment.put(Comment.COMMENT_GOOD_CNT, 0);
comment.put(Comment.COMMENT_BAD_CNT, 0);
comment.put(Comment.COMMENT_SCORE, 0D);
comment.put(Comment.COMMENT_REPLY_CNT, 0);
// Adds the comment
final String commentId = commentRepository.add(comment);
// Updates tag-article relation stat.
final List<JSONObject> tagArticleRels = tagArticleRepository.getByArticleId(articleId);
for (final JSONObject tagArticleRel : tagArticleRels) {
tagArticleRel.put(Article.ARTICLE_LATEST_CMT_TIME, currentTimeMillis);
tagArticleRel.put(Article.ARTICLE_COMMENT_CNT, article.optInt(Article.ARTICLE_COMMENT_CNT));
tagArticleRepository.update(tagArticleRel.optString(Keys.OBJECT_ID), tagArticleRel);
}
transaction.commit();
if (!fromClient && Comment.COMMENT_ANONYMOUS_C_PUBLIC == commentAnonymous
&& Article.ARTICLE_ANONYMOUS_C_PUBLIC == articleAnonymous
&& !TuringQueryService.ROBOT_NAME.equals(commenterName)) {
// Point
final String articleAuthorId = article.optString(Article.ARTICLE_AUTHOR_ID);
if (articleAuthorId.equals(commentAuthorId)) {
pointtransferMgmtService.transfer(commentAuthorId, Pointtransfer.ID_C_SYS,
Pointtransfer.TRANSFER_TYPE_C_ADD_COMMENT, Pointtransfer.TRANSFER_SUM_C_ADD_SELF_ARTICLE_COMMENT,
commentId, System.currentTimeMillis());
} else {
pointtransferMgmtService.transfer(commentAuthorId, articleAuthorId,
Pointtransfer.TRANSFER_TYPE_C_ADD_COMMENT, Pointtransfer.TRANSFER_SUM_C_ADD_COMMENT,
commentId, System.currentTimeMillis());
}
livenessMgmtService.incLiveness(commentAuthorId, Liveness.LIVENESS_COMMENT);
}
// Event
final JSONObject eventData = new JSONObject();
eventData.put(Comment.COMMENT, comment);
eventData.put(Common.FROM_CLIENT, fromClient);
eventData.put(Article.ARTICLE, article);
eventData.put(UserExt.USER_COMMENT_VIEW_MODE, commentViewMode);
try {
eventManager.fireEventAsynchronously(new Event<JSONObject>(EventTypes.ADD_COMMENT_TO_ARTICLE, eventData));
} catch (final EventException e) {
LOGGER.log(Level.ERROR, e.getMessage(), e);
}
return ret;
} catch (final RepositoryException e) {
if (transaction.isActive()) {
transaction.rollback();
}
LOGGER.log(Level.ERROR, "Adds a comment failed", e);
throw new ServiceException(e);
}<|fim▁hole|>
/**
* Updates the specified comment by the given comment id.
*
* @param commentId the given comment id
* @param comment the specified comment
* @throws ServiceException service exception
*/
public void updateComment(final String commentId, final JSONObject comment) throws ServiceException {
final Transaction transaction = commentRepository.beginTransaction();
try {
String content = comment.optString(Comment.COMMENT_CONTENT);
content = Emotions.toAliases(content);
content = StringUtils.trim(content) + " ";
content = content.replace(langPropsService.get("uploadingLabel", Locale.SIMPLIFIED_CHINESE), "");
content = content.replace(langPropsService.get("uploadingLabel", Locale.US), "");
comment.put(Comment.COMMENT_CONTENT, content);
commentRepository.update(commentId, comment);
transaction.commit();
} catch (final RepositoryException e) {
if (transaction.isActive()) {
transaction.rollback();
}
LOGGER.log(Level.ERROR, "Updates a comment[id=" + commentId + "] failed", e);
throw new ServiceException(e);
}
}
}<|fim▁end|>
|
}
|
<|file_name|>driver_alpha_tau_study.py<|end_file_name|><|fim▁begin|># test driver to verify that new version of code works
import opiniongame.config as og_cfg
import opiniongame.IO as og_io
import opiniongame.coupling as og_coupling
import opiniongame.state as og_state
import opiniongame.opinions as og_opinions
import opiniongame.adjacency as og_adj
import opiniongame.selection as og_select
import opiniongame.potentials as og_pot
import opiniongame.core as og_core
import opiniongame.stopping as og_stop
import numpy as np
#
# process command line
#
cmdline = og_cfg.CmdLineArguments()
cmdline.printOut()
#
# load configuration
#
# TODO: add option to generate defaults and save to file
# TODO: interpret args to get filename if specified on cmd line
config = og_cfg.staticParameters()
config.readFromFile('staticParameters.cfg')
config.threshold = 0.01
config.printOut()
#
# seed PRNG: must do this before any random numbers are
# ever sampled during default generation
#
print("SEEDING PRNG: "+str(config.startingseed))<|fim▁hole|>
state = og_state.WorldState.fromCmdlineArguments(cmdline, config)
#
# run
#
tau_list = np.arange(0.45, 0.9, 0.01)
alpha_list = np.arange(0.05, 0.25, 0.01)
numalphas = len(alpha_list)
numtaus = len(tau_list)
numvars = 3
resultMatrix = np.zeros((numalphas, numtaus, numvars))
for (i, alpha) in enumerate(alpha_list):
config.learning_rate = alpha
print("")
for (j, tau) in enumerate(tau_list):
print((alpha, tau))
#
# functions for use by the simulation engine
#
ufuncs = og_cfg.UserFunctions(og_select.FastPairSelection,
og_stop.totalChangeStop,
og_pot.createTent(tau))
polarized = 0
notPolarized = 0
aveIters = 0
for k in range(100):
state = og_core.run_until_convergence(config, state, ufuncs)
results = og_opinions.isPolarized(state.history[-1], 0.05)
for result in results:
if result:
polarized += 1
else:
notPolarized += 1
aveIters += state.iterCount
state.reset()
state.initialOpinions = og_opinions.initialize_opinions(config.popSize, config.ntopics)
# maybe you want to do Consensus and nonConsensus. Finding consensus is easier!
# assuming pop_size = 20, ten people at 1, nine people at 0 and and one person
# at 0.5 will be polarization, but, still ...
resultMatrix[i][j][0] = polarized
resultMatrix[i][j][1] = notPolarized
resultMatrix[i][j][2] = aveIters/100.0
rdict = {}
rdict['results'] = resultMatrix
og_io.saveMatrix('output.mat', rdict)<|fim▁end|>
|
np.random.seed(config.startingseed)
|
<|file_name|>selenium_helper.py<|end_file_name|><|fim▁begin|>from selenium.webdriver.support.select import Select
def get_selected_option(browser, css_selector):
# Takes a css selector for a <select> element and returns the value of<|fim▁hole|> # the selected option
select = Select(browser.find_element_by_css_selector(css_selector))
return select.first_selected_option.get_attribute('value')<|fim▁end|>
| |
<|file_name|>PageInputBlock.tsx<|end_file_name|><|fim▁begin|>import React, {ChangeEvent, FC} from 'react'
type EntityProps = {
onInput: (e: ChangeEvent<HTMLInputElement>) => any,
label?: string,
error?: string,
placeholder?: string,
pattern?: string,
minlength?: number
}
const PageInputBlock: FC<EntityProps> = (props) => {
const {onInput, label, error, placeholder = '', pattern, minlength = 2} = props
return (
<div className="input-block">
{label && <label>{label}</label>}
<input type='text'<|fim▁hole|> placeholder={placeholder}
minLength={minlength}
maxLength={40}
onInput={onInput}
pattern={pattern}
className={error ? 'incorrect' : ''}
/>
{error && <div className='error'>{error}</div>}
</div>
)
}
export default PageInputBlock<|fim▁end|>
| |
<|file_name|>logging.py<|end_file_name|><|fim▁begin|># standard imports
import os
import logging
import traceback
# Qt imports
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QPlainTextEdit
# toolbox imports
from dltb.util.debug import edit
# GUI imports
from ..utils import protect
# logging
LOG = logging.getLogger(__name__)
class QLogHandler(QPlainTextEdit, logging.Handler):
"""A log handler that displays log messages in a QWidget.
A :py:class:`QLogHandler` can be used
"""
_message_signal = pyqtSignal(str)
def __init__(self, parent=None):
# FIXME[question]: can we use real python multiple inheritance here?<|fim▁hole|> # (that is just super().__init__(*args, **kwargs))
QPlainTextEdit.__init__(self, parent)
logging.Handler.__init__(self)
self.setReadOnly(True)
self._records = []
self._counter = 1
self._message_signal.connect(self.appendMessage)
self._message_signal.emit("Log view initialized")
def __len__(self):
"""The number of lines in this QLogHandler.
"""
return self._counter
def clear(self):
"""Clear this :py:class:QLogHandler.
"""
super().clear()
self._records.clear()
self._counter = 1
self._message_signal.emit("Log view cleared")
@pyqtSlot(str)
def appendMessage(self, message: str):
message = message.replace(os.linesep, '\\n')
self.appendPlainText(message)
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
def emit(self, record: logging.LogRecord) -> None:
"""Handle a :py:class:logging.logRecord.
"""
# Here we have to be careful: adding the text directly to the
# widget from another thread causes problems: The program
# crashes with the following message:
# QObject::connect: Cannot queue arguments of type 'QTextBlock'
# (Make sure 'QTextBlock' is registered using qRegisterMetaType().)
# Hence we are doing this via a signal now.
self._counter += 1
self.setToolTip(f"List of log records ({self._counter} entries)")
try:
self._records.append(record)
self._message_signal.emit(self.format(record))
except AttributeError as error:
# FIXME[bug/problem]
# When quitting the program while running some background
# thread (e.g. camera loop), we get the following exception:
# AttributeError: 'QLogHandler' does not have a signal with
# the signature _message_signal(QString)
#print(error)
#print(f" type of record: {type(record)}")
#print(f" record: {record}")
#print(f" signal: {self._message_signal}")
pass
@protect
def mouseReleaseEvent(self, event):
cursor = self.cursorForPosition(event.pos())
block = cursor.blockNumber()
print(block, len(self._records))
if block < len(self._records):
print(self._records[block])
record = self._records[block]
LOG.info(f"Trying to open file {record.pathname}, "
f"line {record.lineno}, in an external editor.")
try:
retcode = edit(record.pathname, record.lineno)
if retcode < 0:
LOG.error("Edit command was terminated by signal "
f"{-retcode}")
else:
LOG.info(f"Edit command returned: {retcode}")
except OSError as error:
LOG.error(f"Edit command failed: {error}")
class QExceptionView(QPlainTextEdit):
"""A view for Python exceptions. This is basically a text field in
which a :py:class:`BaseException` can be displayed, including its
stack trace.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.setReadOnly(True)
self._exception = None
self._traceback = None
def setException(self, exception: BaseException) -> None:
"""Set the :py:class:`BaseException` to be displayed in this
:py:class:`QExceptionView`
"""
self._exception = exception
self._traceback = traceback.extract_tb(exception.__traceback__)
# _traceback is basicall a list of traceback.FrameSummary,
# each providing the following attributes:
# - filename
# - line
# - lineno
# - locals
# - name
self.clear()
for m in traceback.format_list(self._traceback):
self.appendPlainText(m)
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
@protect
def mouseReleaseEvent(self, event) -> None:
"""Handle a mouse release event. When pressed on a frame in the stack
trace, open the correspoding code line in an external editor.
"""
cursor = self.cursorForPosition(event.pos())
frame_number = cursor.blockNumber() // 2
if self._traceback is not None and frame_number < len(self._traceback):
self.editFrame(self._traceback[frame_number])
def editFrame(self, frame: traceback.FrameSummary):
"""Edit the the code file described by the given stack frame in an
external editor.
"""
LOG.info(f"Trying to open file {frame.filename}, "
f"line {frame.lineno}, in an external editor.")
try:
retcode = edit(frame.filename, frame.lineno)
if retcode < 0:
LOG.error("Edit command was terminated by signal "
f"{-retcode}")
else:
LOG.info(f"Edit command returned: {retcode}"
f"({'error' if retcode else 'success'})")
except OSError as error:
LOG.error(f"Edit command failed: {error}")<|fim▁end|>
| |
<|file_name|>proxy_test.go<|end_file_name|><|fim▁begin|>// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium
//go:build privileged_tests
// +build privileged_tests
package dnsproxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"sync"
"testing"
"time"
"github.com/golang/groupcache/lru"
"github.com/miekg/dns"
. "gopkg.in/check.v1"
"github.com/cilium/cilium/pkg/addressing"
"github.com/cilium/cilium/pkg/checker"
"github.com/cilium/cilium/pkg/completion"
"github.com/cilium/cilium/pkg/datapath"
"github.com/cilium/cilium/pkg/endpoint"
"github.com/cilium/cilium/pkg/endpoint/regeneration"
"github.com/cilium/cilium/pkg/fqdn/restore"
"github.com/cilium/cilium/pkg/identity"
"github.com/cilium/cilium/pkg/identity/cache"
"github.com/cilium/cilium/pkg/ipcache"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/policy"
"github.com/cilium/cilium/pkg/policy/api"
"github.com/cilium/cilium/pkg/revert"
"github.com/cilium/cilium/pkg/source"
testidentity "github.com/cilium/cilium/pkg/testutils/identity"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) {
TestingT(t)
}
type DNSProxyTestSuite struct {
repo *policy.Repository
dnsTCPClient *dns.Client
dnsServer *dns.Server
proxy *DNSProxy
restoring bool
}
func (s *DNSProxyTestSuite) GetPolicyRepository() *policy.Repository {
return s.repo
}
func (s *DNSProxyTestSuite) GetProxyPort(l7Type policy.L7ParserType, ingress bool) (uint16, string, error) {
return 0, "", nil
}
func (s *DNSProxyTestSuite) UpdateProxyRedirect(e regeneration.EndpointUpdater, l4 *policy.L4Filter, wg *completion.WaitGroup) (uint16, error, revert.FinalizeFunc, revert.RevertFunc) {
return 0, nil, nil, nil
}
func (s *DNSProxyTestSuite) RemoveProxyRedirect(e regeneration.EndpointInfoSource, id string, wg *completion.WaitGroup) (error, revert.FinalizeFunc, revert.RevertFunc) {
return nil, nil, nil
}
func (s *DNSProxyTestSuite) UpdateNetworkPolicy(e regeneration.EndpointUpdater, vis *policy.VisibilityPolicy, policy *policy.L4Policy,
proxyWaitGroup *completion.WaitGroup) (error, revert.RevertFunc) {
return nil, nil
}
func (s *DNSProxyTestSuite) RemoveNetworkPolicy(e regeneration.EndpointInfoSource) {}
func (s *DNSProxyTestSuite) QueueEndpointBuild(ctx context.Context, epID uint64) (func(), error) {
return nil, nil
}
func (s *DNSProxyTestSuite) GetCompilationLock() *lock.RWMutex {
return nil
}
func (s *DNSProxyTestSuite) GetCIDRPrefixLengths() (s6, s4 []int) {
return nil, nil
}
func (s *DNSProxyTestSuite) SendNotification(msg monitorAPI.AgentNotifyMessage) error {
return nil
}
func (s *DNSProxyTestSuite) Datapath() datapath.Datapath {
return nil
}
func (s *DNSProxyTestSuite) GetDNSRules(epID uint16) restore.DNSRules {
return nil
}
func (s *DNSProxyTestSuite) RemoveRestoredDNSRules(epID uint16) {
}
var _ = Suite(&DNSProxyTestSuite{})
func setupServer(c *C) (dnsServer *dns.Server) {
waitOnListen := make(chan struct{})
dnsServer = &dns.Server{Addr: ":0", Net: "tcp", NotifyStartedFunc: func() { close(waitOnListen) }}
go dnsServer.ListenAndServe()
dns.HandleFunc(".", serveDNS)
select {
case <-waitOnListen:
return dnsServer
case <-time.After(10 * time.Second):
c.Error("DNS server did not start listening")
}
return nil
}
func teardown(dnsServer *dns.Server) {
dnsServer.Listener.Close()
}
func serveDNS(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
retARR, err := dns.NewRR(m.Question[0].Name + " 60 IN A 1.1.1.1")
if err != nil {
panic(err)
}
m.Answer = append(m.Answer, retARR)
w.WriteMsg(m)
}
type DummySelectorCacheUser struct{}
func (d *DummySelectorCacheUser) IdentitySelectionUpdated(selector policy.CachedSelector, added, deleted []identity.NumericIdentity) {
}
// Setup identities, ports and endpoint IDs we will need
var (
cacheAllocator = cache.NewCachingIdentityAllocator(&testidentity.IdentityAllocatorOwnerMock{})
fakeAllocator = testidentity.NewMockIdentityAllocator(cacheAllocator.GetIdentityCache())
testSelectorCache = policy.NewSelectorCache(fakeAllocator, cacheAllocator.GetIdentityCache())
dummySelectorCacheUser = &DummySelectorCacheUser{}
DstID1Selector = api.NewESFromLabels(labels.ParseSelectLabel("k8s:Dst1=test"))
cachedDstID1Selector, _ = testSelectorCache.AddIdentitySelector(dummySelectorCacheUser, DstID1Selector)
DstID2Selector = api.NewESFromLabels(labels.ParseSelectLabel("k8s:Dst2=test"))
cachedDstID2Selector, _ = testSelectorCache.AddIdentitySelector(dummySelectorCacheUser, DstID2Selector)
DstID3Selector = api.NewESFromLabels(labels.ParseSelectLabel("k8s:Dst3=test"))
cachedDstID3Selector, _ = testSelectorCache.AddIdentitySelector(dummySelectorCacheUser, DstID3Selector)
DstID4Selector = api.NewESFromLabels(labels.ParseSelectLabel("k8s:Dst4=test"))
cachedDstID4Selector, _ = testSelectorCache.AddIdentitySelector(dummySelectorCacheUser, DstID4Selector)
cachedWildcardSelector, _ = testSelectorCache.AddIdentitySelector(dummySelectorCacheUser, api.WildcardEndpointSelector)
epID1 = uint64(111)
epID2 = uint64(222)
epID3 = uint64(333)
dstID1 = identity.NumericIdentity(1001)
dstID2 = identity.NumericIdentity(2002)
dstID3 = identity.NumericIdentity(3003)
dstID4 = identity.NumericIdentity(4004)
dstPort = uint16(53) // Set below when we setup the server!
)
func (s *DNSProxyTestSuite) SetUpTest(c *C) {
// Add these identities
wg := &sync.WaitGroup{}
testSelectorCache.UpdateIdentities(cache.IdentityCache{
dstID1: labels.Labels{"Dst1": labels.NewLabel("Dst1", "test", labels.LabelSourceK8s)}.LabelArray(),
dstID2: labels.Labels{"Dst2": labels.NewLabel("Dst2", "test", labels.LabelSourceK8s)}.LabelArray(),
dstID3: labels.Labels{"Dst3": labels.NewLabel("Dst3", "test", labels.LabelSourceK8s)}.LabelArray(),
dstID4: labels.Labels{"Dst4": labels.NewLabel("Dst4", "test", labels.LabelSourceK8s)}.LabelArray(),
}, nil, wg)
wg.Wait()
s.repo = policy.NewPolicyRepository(nil, nil, nil)
s.dnsTCPClient = &dns.Client{Net: "tcp", Timeout: time.Second, SingleInflight: true}
s.dnsServer = setupServer(c)
c.Assert(s.dnsServer, Not(IsNil), Commentf("unable to setup DNS server"))
proxy, err := StartDNSProxy("", 0, true, 1000, // any address, any port, enable compression, max 1000 restore IPs
// LookupEPByIP
func(ip net.IP) (*endpoint.Endpoint, error) {
if s.restoring {
return nil, fmt.Errorf("No EPs available when restoring")
}
return endpoint.NewEndpointWithState(s, s, &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), uint16(epID1), endpoint.StateReady), nil
},
// LookupSecIDByIP
func(ip net.IP) (ipcache.Identity, bool) {
DNSServerListenerAddr := (s.dnsServer.Listener.Addr()).(*net.TCPAddr)
switch {
case ip.String() == DNSServerListenerAddr.IP.String():
ident := ipcache.Identity{
ID: dstID1,
Source: source.Unspec}
return ident, true
default:
ident := ipcache.Identity{
ID: dstID2,
Source: source.Unspec}
return ident, true
}
},
// LookupIPsBySecID
func(nid identity.NumericIdentity) []string {
DNSServerListenerAddr := (s.dnsServer.Listener.Addr()).(*net.TCPAddr)
switch nid {
case dstID1:
return []string{DNSServerListenerAddr.IP.String()}
case dstID2:
return []string{"127.0.0.1", "127.0.0.2"}
default:
return nil
}
},
// NotifyOnDNSMsg
func(lookupTime time.Time, ep *endpoint.Endpoint, epIPPort string, dstAddr string, msg *dns.Msg, protocol string, allowed bool, stat *ProxyRequestContext) error {
return nil
})
c.Assert(err, IsNil, Commentf("error starting DNS Proxy"))
s.proxy = proxy
// This is here because Listener or Listeer.Addr() was nil. The
// lookupTargetDNSServer function doesn't need to change the target.
c.Assert(s.dnsServer.Listener, Not(IsNil), Commentf("DNS server missing a Listener"))
DNSServerListenerAddr := (s.dnsServer.Listener.Addr()).(*net.TCPAddr)
c.Assert(DNSServerListenerAddr, Not(IsNil), Commentf("DNS server missing a Listener address"))
s.proxy.lookupTargetDNSServer = func(w dns.ResponseWriter) (serverIP net.IP, serverPort uint16, addrStr string, err error) {
return DNSServerListenerAddr.IP, uint16(DNSServerListenerAddr.Port), DNSServerListenerAddr.String(), nil
}
dstPort = uint16(DNSServerListenerAddr.Port)
}
func (s *DNSProxyTestSuite) TearDownTest(c *C) {
s.proxy.allowed = make(perEPAllow)
s.proxy.SetRejectReply(option.FQDNProxyDenyWithRefused)
s.dnsServer.Listener.Close()
s.proxy.UDPServer.Shutdown()
s.proxy.TCPServer.Shutdown()
}
func (s *DNSProxyTestSuite) TestRejectFromDifferentEndpoint(c *C) {
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := name
// Reject a query from not endpoint 1
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID2, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was not rejected when it should be blocked"))
}
func (s *DNSProxyTestSuite) TestAcceptFromMatchingEndpoint(c *C) {
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := name
// accept a query that matches from endpoint1
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
}
func (s *DNSProxyTestSuite) TestAcceptNonRegex(c *C) {
name := "simple.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := name
// accept a query that matches from endpoint1
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
}
func (s *DNSProxyTestSuite) TestRejectNonRegex(c *C) {
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := "ciliumXio."
// reject a query for a non-regex where a . is different (i.e. ensure simple FQDNs treat . as .)
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was not rejected when it should be blocked"))
}
func (s *DNSProxyTestSuite) requestRejectNonMatchingRefusedResponse(c *C) *dns.Msg {
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := "notcilium.io."
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was not rejected when it should be blocked"))
request := new(dns.Msg)
request.SetQuestion(query, dns.TypeA)
return request
}
func (s *DNSProxyTestSuite) TestRejectNonMatchingRefusedResponseWithNameError(c *C) {
request := s.requestRejectNonMatchingRefusedResponse(c)
// reject a query with NXDomain
s.proxy.SetRejectReply(option.FQDNProxyDenyWithNameError)
response, _, err := s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed"))
c.Assert(response.Rcode, Equals, dns.RcodeNameError, Commentf("DNS request from test client was not rejected when it should be blocked"))
}
func (s *DNSProxyTestSuite) TestRejectNonMatchingRefusedResponseWithRefused(c *C) {
request := s.requestRejectNonMatchingRefusedResponse(c)
// reject a query with Refused
s.proxy.SetRejectReply(option.FQDNProxyDenyWithRefused)
response, _, err := s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed"))
c.Assert(response.Rcode, Equals, dns.RcodeRefused, Commentf("DNS request from test client was not rejected when it should be blocked"))
}
func (s *DNSProxyTestSuite) TestRespondViaCorrectProtocol(c *C) {
// Respond with an actual answer for the query. This also tests that the
// connection was forwarded via the correct protocol (tcp/udp) because we
// connet with TCP, and the server only listens on TCP.
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := name
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
request := new(dns.Msg)
request.SetQuestion(query, dns.TypeA)
response, rtt, err := s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed (RTT: %v)", rtt))
c.Assert(len(response.Answer), Equals, 1, Commentf("Proxy returned incorrect number of answer RRs %s", response))
c.Assert(response.Answer[0].String(), Equals, "cilium.io.\t60\tIN\tA\t1.1.1.1", Commentf("Proxy returned incorrect RRs"))
}
func (s *DNSProxyTestSuite) TestRespondMixedCaseInRequestResponse(c *C) {
// Test that mixed case query is allowed out and then back in to support
// high-order-bit query uniqueing schemes (and a data exfiltration
// vector :( )
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := "CILIUM.io."
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
request := new(dns.Msg)
request.SetQuestion(query, dns.TypeA)
response, _, err := s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed"))
c.Assert(len(response.Answer), Equals, 1, Commentf("Proxy returned incorrect number of answer RRs %s", response))
c.Assert(response.Answer[0].String(), Equals, "CILIUM.io.\t60\tIN\tA\t1.1.1.1", Commentf("Proxy returned incorrect RRs"))
request.SetQuestion("ciliuM.io.", dns.TypeA)
response, _, err = s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed"))
c.Assert(len(response.Answer), Equals, 1, Commentf("Proxy returned incorrect number of answer RRs %+v", response.Answer))
c.Assert(response.Answer[0].String(), Equals, "ciliuM.io.\t60\tIN\tA\t1.1.1.1", Commentf("Proxy returned incorrect RRs"))
}
func (s *DNSProxyTestSuite) TestCheckAllowedTwiceRemovedOnce(c *C) {
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := name
// Add the rule twice
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
err = s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Delete once, it should reject
err = s.proxy.UpdateAllowed(epID1, dstPort, nil)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err = s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Delete once, it should reject and not crash
err = s.proxy.UpdateAllowed(epID1, dstPort, nil)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err = s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
}
func (s *DNSProxyTestSuite) TestFullPathDependence(c *C) {
// Test that we consider each of endpoint ID, destination SecID (via the
// selector in L7DataMap), destination port (set in the redirect itself) and
// the DNS name.
// The rules approximate:
// +------+--------+---------+----------------+
// | From | To | DstPort | DNSNames |
// +======+========+=========+================+
// | EP1 | DstID1 | 53 | *.ubuntu.com |
// | EP1 | DstID1 | 53 | aws.amazon.com |
// | EP1 | DstID2 | 53 | cilium.io |
// | EP1 | * | 54 | example.com |
// | EP3 | DstID1 | 53 | example.com |
// | EP3 | DstID3 | 53 | * |
// | EP3 | DstID4 | 53 | nil |
// +------+--------+---------+----------------+
//
// Cases:
// +------+-------+--------+------+----------------+----------+----------------------------------------------------------------+
// | Case | From | To | Port | Query | Outcome | Reason |
// +------+-------+--------+------+----------------+----------+----------------------------------------------------------------+
// | 1 | EPID1 | DstID1 | 53 | www.ubuntu.com | Allowed | |
// | 2 | EPID1 | DstID1 | 54 | cilium.io | Rejected | Port 54 only allows example.com |
// | 3 | EPID1 | DstID2 | 53 | cilium.io | Allowed | |
// | 4 | EPID1 | DstID2 | 53 | aws.amazon.com | Rejected | Only cilium.io is allowed with DstID2 |
// | 5 | EPID1 | DstID1 | 54 | example.com | Allowed | |
// | 6 | EPID2 | DstID1 | 53 | cilium.io | Rejected | EPID2 is not allowed as a source by any policy |
// | 7 | EPID3 | DstID1 | 53 | example.com | Allowed | |
// | 8 | EPID3 | DstID1 | 53 | aws.amazon.com | Rejected | EPID3 is only allowed to ask DstID1 on Port 53 for example.com |
// | 8 | EPID3 | DstID1 | 54 | example.com | Rejected | EPID3 is only allowed to ask DstID1 on Port 53 for example.com |
// | 9 | EPID3 | DstID2 | 53 | example.com | Rejected | EPID3 is only allowed to ask DstID1 on Port 53 for example.com |
// | 10 | EPID3 | DstID3 | 53 | example.com | Allowed | Allowed due to wildcard match pattern |
// | 11 | EPID3 | DstID4 | 53 | example.com | Allowed | Allowed due to a nil rule |
// +------+-------+--------+------+----------------+----------+----------------------------------------------------------------+
// Setup rules
// | EP1 | DstID1 | 53 | *.ubuntu.com |
// | EP1 | DstID1 | 53 | aws.amazon.com |
// | EP1 | DstID2 | 53 | cilium.io |
err := s.proxy.UpdateAllowed(epID1, 53, policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchPattern: "*.ubuntu.com."},
{MatchPattern: "aws.amazon.com."},
},
},
},
cachedDstID2Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchPattern: "cilium.io."},
},
},
},
})
c.Assert(err, Equals, nil, Commentf("Could not update with port 53 rules"))
// | EP1 | DstID1 | 54 | example.com |
err = s.proxy.UpdateAllowed(epID1, 54, policy.L7DataMap{
cachedWildcardSelector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchPattern: "example.com."},
},
},
},
})
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
// | EP3 | DstID1 | 53 | example.com |
// | EP3 | DstID3 | 53 | * |
// | EP3 | DstID4 | 53 | nil |
err = s.proxy.UpdateAllowed(epID3, 53, policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchPattern: "example.com."},
},
},
},
cachedDstID3Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{
{MatchPattern: "*"},
},
},
},
cachedDstID4Selector: nil,
})
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
// Test cases
// Case 1 | EPID1 | DstID1 | 53 | www.ubuntu.com | Allowed
allowed, err := s.proxy.CheckAllowed(epID1, 53, dstID1, nil, "www.ubuntu.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 2 | EPID1 | DstID1 | 54 | cilium.io | Rejected | Port 54 only allows example.com
allowed, err = s.proxy.CheckAllowed(epID1, 54, dstID1, nil, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 3 | EPID1 | DstID2 | 53 | cilium.io | Allowed
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID2, nil, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 4 | EPID1 | DstID2 | 53 | aws.amazon.com | Rejected | Only cilium.io is allowed with DstID2
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID2, nil, "aws.amazon.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 5 | EPID1 | DstID1 | 54 | example.com | Allowed
allowed, err = s.proxy.CheckAllowed(epID1, 54, dstID1, nil, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 6 | EPID2 | DstID1 | 53 | cilium.io | Rejected | EPID2 is not allowed as a source by any policy
allowed, err = s.proxy.CheckAllowed(epID2, 53, dstID1, nil, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 7 | EPID3 | DstID1 | 53 | example.com | Allowed
allowed, err = s.proxy.CheckAllowed(epID3, 53, dstID1, nil, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 8 | EPID3 | DstID1 | 53 | aws.amazon.com | Rejected | EPID3 is only allowed to ask DstID1 on Port 53 for example.com
allowed, err = s.proxy.CheckAllowed(epID3, 53, dstID1, nil, "aws.amazon.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))<|fim▁hole|> c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 9 | EPID3 | DstID2 | 53 | example.com | Rejected | EPID3 is only allowed to ask DstID1 on Port 53 for example.com
allowed, err = s.proxy.CheckAllowed(epID3, 53, dstID2, nil, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 10 | EPID3 | DstID3 | 53 | example.com | Allowed due to wildcard match pattern
allowed, err = s.proxy.CheckAllowed(epID3, 53, dstID3, nil, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 11 | EPID3 | DstID4 | 53 | example.com | Allowed due to a nil rule
allowed, err = s.proxy.CheckAllowed(epID3, 53, dstID4, nil, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Get rules for restoration
expected1 := restore.DNSRules{
53: restore.IPRules{{
IPs: map[string]struct{}{"::": {}},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID1][53][cachedDstID1Selector]},
}, {
IPs: map[string]struct{}{"127.0.0.1": {}, "127.0.0.2": {}},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID1][53][cachedDstID2Selector]},
}}.Sort(),
54: restore.IPRules{{
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID1][54][cachedWildcardSelector]},
}},
}
restored1, _ := s.proxy.GetRules(uint16(epID1))
restored1.Sort()
c.Assert(restored1, checker.DeepEquals, expected1)
expected2 := restore.DNSRules{}
restored2, _ := s.proxy.GetRules(uint16(epID2))
restored2.Sort()
c.Assert(restored2, checker.DeepEquals, expected2)
expected3 := restore.DNSRules{
53: restore.IPRules{{
IPs: map[string]struct{}{"::": {}},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID3][53][cachedDstID1Selector]},
}, {
IPs: map[string]struct{}{},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID3][53][cachedDstID3Selector]},
}, {
IPs: map[string]struct{}{},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID3][53][cachedDstID4Selector]},
}}.Sort(),
}
restored3, _ := s.proxy.GetRules(uint16(epID3))
restored3.Sort()
c.Assert(restored3, checker.DeepEquals, expected3)
// Test with limited set of allowed IPs
oldUsed := s.proxy.usedServers
s.proxy.usedServers = map[string]struct{}{"127.0.0.2": {}}
expected1b := restore.DNSRules{
53: restore.IPRules{{
IPs: map[string]struct{}{},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID1][53][cachedDstID1Selector]},
}, {
IPs: map[string]struct{}{"127.0.0.2": {}},
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID1][53][cachedDstID2Selector]},
}}.Sort(),
54: restore.IPRules{{
Re: restore.RuleRegex{Regexp: s.proxy.allowed[epID1][54][cachedWildcardSelector]},
}},
}
restored1b, _ := s.proxy.GetRules(uint16(epID1))
restored1b.Sort()
c.Assert(restored1b, checker.DeepEquals, expected1b)
// unlimited again
s.proxy.usedServers = oldUsed
s.proxy.UpdateAllowed(epID1, 53, nil)
s.proxy.UpdateAllowed(epID1, 54, nil)
_, exists := s.proxy.allowed[epID1]
c.Assert(exists, Equals, false)
_, exists = s.proxy.allowed[epID2]
c.Assert(exists, Equals, false)
s.proxy.UpdateAllowed(epID3, 53, nil)
_, exists = s.proxy.allowed[epID3]
c.Assert(exists, Equals, false)
dstIP1 := (s.dnsServer.Listener.Addr()).(*net.TCPAddr).IP
dstIP2a := net.ParseIP("127.0.0.1")
dstIP2b := net.ParseIP("127.0.0.2")
dstIPrandom := net.ParseIP("127.0.0.42")
// Before restore: all rules removed above, everything is dropped
// Case 1 | EPID1 | DstID1 | 53 | www.ubuntu.com | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID1, dstIP1, "www.ubuntu.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 2 | EPID1 | DstID1 | 54 | cilium.io | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, dstID1, dstIP1, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 3 | EPID1 | DstID2 | 53 | cilium.io | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID2, dstIP2a, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 4 | EPID1 | DstID2 | 53 | aws.amazon.com | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID2, dstIP2b, "aws.amazon.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 5 | EPID1 | DstID1 | 54 | example.com | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, dstID1, dstIP1, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Restore rules
ep1 := endpoint.NewEndpointWithState(s, s, &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), uint16(epID1), endpoint.StateReady)
ep1.DNSRules = restored1
s.proxy.RestoreRules(ep1)
_, exists = s.proxy.restored[epID1]
c.Assert(exists, Equals, true)
// Same tests with 2 (WORLD) dstID to make sure it is not used, but with correct destination IP
// Case 1 | EPID1 | dstIP1 | 53 | www.ubuntu.com | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIP1, "www.ubuntu.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 2 | EPID1 | dstIP1 | 54 | cilium.io | Rejected due to restored rules | Port 54 only allows example.com
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIP1, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 3 | EPID1 | dstIP2a | 53 | cilium.io | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIP2a, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 4 | EPID1 | dstIP2b | 53 | aws.amazon.com | Rejected due to restored rules | Only cilium.io is allowed with DstID2
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIP2b, "aws.amazon.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 5 | EPID1 | dstIP1 | 54 | example.com | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIP1, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// make sure random IP is not allowed
// Case 5 | EPID1 | random IP | 53 | example.com | Rejected due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIPrandom, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// make sure random destination IP is allowed in a wildcard selector
// Case 5 | EPID1 | random IP | 54 | example.com | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIPrandom, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Restore rules for epID3
ep3 := endpoint.NewEndpointWithState(s, s, &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), uint16(epID3), endpoint.StateReady)
ep3.DNSRules = restored3
s.proxy.RestoreRules(ep3)
_, exists = s.proxy.restored[epID3]
c.Assert(exists, Equals, true)
// Set empty ruleset, check that restored rules were deleted in epID3
err = s.proxy.UpdateAllowed(epID3, 53, nil)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
_, exists = s.proxy.restored[epID3]
c.Assert(exists, Equals, false)
// epID1 still has restored rules
_, exists = s.proxy.restored[epID1]
c.Assert(exists, Equals, true)
// Marshal restored rules to JSON
jsn, err := json.Marshal(s.proxy.restored[epID1])
c.Assert(err, Equals, nil, Commentf("Could not marshal restored rules to json"))
expected := `
{
"53": [{
"Re": "(^[-a-zA-Z0-9_]*[.]ubuntu[.]com[.]$)|(^aws[.]amazon[.]com[.]$)",
"IPs": {"::": {}}
}, {
"Re": "(^cilium[.]io[.]$)",
"IPs": {"127.0.0.1": {}, "127.0.0.2": {}}
}],
"54": [{
"Re": "(^example[.]com[.]$)",
"IPs": null
}]
}`
pretty := new(bytes.Buffer)
err = json.Compact(pretty, []byte(expected))
c.Assert(err, Equals, nil, Commentf("Could not compact expected json"))
c.Assert(string(jsn), Equals, pretty.String())
s.proxy.RemoveRestoredRules(uint16(epID1))
_, exists = s.proxy.restored[epID1]
c.Assert(exists, Equals, false)
// Before restore after marshal: previous restored rules are removed, everything is dropped
// Case 1 | EPID1 | DstID1 | 53 | www.ubuntu.com | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID1, dstIP1, "www.ubuntu.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 2 | EPID1 | DstID1 | 54 | cilium.io | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, dstID1, dstIP1, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 3 | EPID1 | DstID2 | 53 | cilium.io | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID2, dstIP2a, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 4 | EPID1 | DstID2 | 53 | aws.amazon.com | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, dstID2, dstIP2b, "aws.amazon.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 5 | EPID1 | DstID1 | 54 | example.com | Rejected | No rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, dstID1, dstIP1, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 5 | EPID1 | random IP | 54 | example.com | Rejected
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIPrandom, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Restore Unmarshaled rules
var rules restore.DNSRules
err = json.Unmarshal(jsn, &rules)
rules = rules.Sort()
c.Assert(err, Equals, nil, Commentf("Could not unmarshal restored rules from json"))
c.Assert(rules, checker.DeepEquals, expected1)
// Marshal again & compare
// Marshal restored rules to JSON
jsn2, err := json.Marshal(rules)
c.Assert(err, Equals, nil, Commentf("Could not marshal restored rules to json"))
c.Assert(string(jsn2), Equals, pretty.String())
ep1.DNSRules = rules
s.proxy.RestoreRules(ep1)
_, exists = s.proxy.restored[epID1]
c.Assert(exists, Equals, true)
// After restoration of JSON marshaled/unmarshaled rules
// Case 1 | EPID1 | dstIP1 | 53 | www.ubuntu.com | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIP1, "www.ubuntu.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 2 | EPID1 | dstIP1 | 54 | cilium.io | Rejected due to restored rules | Port 54 only allows example.com
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIP1, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 3 | EPID1 | dstIP2a | 53 | cilium.io | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIP2a, "cilium.io")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// Case 4 | EPID1 | dstIP2b | 53 | aws.amazon.com | Rejected due to restored rules | Only cilium.io is allowed with DstID2
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIP2b, "aws.amazon.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// Case 5 | EPID1 | dstIP1 | 54 | example.com | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIP1, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// make sure random IP is not allowed
// Case 5 | EPID1 | random IP | 53 | example.com | Rejected due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 53, 2, dstIPrandom, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, false, Commentf("request was allowed when it should be rejected"))
// make sure random IP is allowed on a wildcard
// Case 5 | EPID1 | random IP | 54 | example.com | Allowed due to restored rules
allowed, err = s.proxy.CheckAllowed(epID1, 54, 2, dstIPrandom, "example.com")
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
s.proxy.RemoveRestoredRules(uint16(epID1))
_, exists = s.proxy.restored[epID1]
c.Assert(exists, Equals, false)
}
func (s *DNSProxyTestSuite) TestRestoredEndpoint(c *C) {
// Respond with an actual answer for the query. This also tests that the
// connection was forwarded via the correct protocol (tcp/udp) because we
// connet with TCP, and the server only listens on TCP.
name := "cilium.io."
l7map := policy.L7DataMap{
cachedDstID1Selector: &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: []api.PortRuleDNS{{MatchName: name}},
},
},
}
query := name
err := s.proxy.UpdateAllowed(epID1, dstPort, l7map)
c.Assert(err, Equals, nil, Commentf("Could not update with rules"))
allowed, err := s.proxy.CheckAllowed(epID1, dstPort, dstID1, nil, query)
c.Assert(err, Equals, nil, Commentf("Error when checking allowed"))
c.Assert(allowed, Equals, true, Commentf("request was rejected when it should be allowed"))
// 1st request
request := new(dns.Msg)
request.SetQuestion(query, dns.TypeA)
response, rtt, err := s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed (RTT: %v)", rtt))
c.Assert(len(response.Answer), Equals, 1, Commentf("Proxy returned incorrect number of answer RRs %s", response))
c.Assert(response.Answer[0].String(), Equals, "cilium.io.\t60\tIN\tA\t1.1.1.1", Commentf("Proxy returned incorrect RRs"))
// Get restored rules
restored, _ := s.proxy.GetRules(uint16(epID1))
restored.Sort()
// remove rules
err = s.proxy.UpdateAllowed(epID1, dstPort, nil)
c.Assert(err, Equals, nil, Commentf("Could not remove rules"))
// 2nd request, refused due to no rules
request = new(dns.Msg)
request.SetQuestion(query, dns.TypeA)
response, rtt, err = s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed (RTT: %v)", rtt))
c.Assert(len(response.Answer), Equals, 0, Commentf("Proxy returned incorrect number of answer RRs %s", response))
c.Assert(response.Rcode, Equals, dns.RcodeRefused, Commentf("DNS request from test client was not rejected when it should be blocked"))
// restore rules, set the mock to restoring state
s.restoring = true
ep1 := endpoint.NewEndpointWithState(s, s, &endpoint.FakeEndpointProxy{}, testidentity.NewMockIdentityAllocator(nil), uint16(epID1), endpoint.StateReady)
ep1.IPv4, _ = addressing.NewCiliumIPv4("127.0.0.1")
ep1.IPv6, _ = addressing.NewCiliumIPv6("::1")
ep1.DNSRules = restored
s.proxy.RestoreRules(ep1)
_, exists := s.proxy.restored[epID1]
c.Assert(exists, Equals, true)
// 3nd request, answered due to restored Endpoint and rules being found
request = new(dns.Msg)
request.SetQuestion(query, dns.TypeA)
response, rtt, err = s.dnsTCPClient.Exchange(request, s.proxy.TCPServer.Listener.Addr().String())
c.Assert(err, IsNil, Commentf("DNS request from test client failed when it should succeed (RTT: %v)", rtt))
c.Assert(len(response.Answer), Equals, 1, Commentf("Proxy returned incorrect number of answer RRs %s", response))
c.Assert(response.Answer[0].String(), Equals, "cilium.io.\t60\tIN\tA\t1.1.1.1", Commentf("Proxy returned incorrect RRs"))
// cleanup
s.proxy.RemoveRestoredRules(uint16(epID1))
_, exists = s.proxy.restored[epID1]
c.Assert(exists, Equals, false)
s.restoring = false
}
type selectorMock struct {
}
func (t selectorMock) GetSelections() []identity.NumericIdentity {
panic("implement me")
}
func (t selectorMock) Selects(nid identity.NumericIdentity) bool {
panic("implement me")
}
func (t selectorMock) IsWildcard() bool {
panic("implement me")
}
func (t selectorMock) IsNone() bool {
panic("implement me")
}
func (t selectorMock) String() string {
panic("implement me")
}
func Benchmark_perEPAllow_setPortRulesForID(b *testing.B) {
const (
nMatchPatterns = 100
)
var selectorA, selectorB *selectorMock
newRules := policy.L7DataMap{
selectorA: nil,
selectorB: nil,
}
var portRuleDNS []api.PortRuleDNS
for i := 0; i < nMatchPatterns; i++ {
portRuleDNS = append(portRuleDNS, api.PortRuleDNS{
MatchPattern: "kubernetes.default.svc.cluster.local",
})
}
for selector := range newRules {
newRules[selector] = &policy.PerSelectorPolicy{
L7Rules: api.L7Rules{
DNS: portRuleDNS,
},
}
}
pea := perEPAllow{}
lru := lru.New(128)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for epID := uint64(0); epID < 20; epID++ {
pea.setPortRulesForID(lru, epID, 8053, newRules)
}
}
}<|fim▁end|>
|
// Case 8 | EPID3 | DstID1 | 54 | example.com | Rejected | EPID3 is only allowed to ask DstID1 on Port 53 for example.com
allowed, err = s.proxy.CheckAllowed(epID3, 54, dstID1, nil, "example.com")
|
<|file_name|>base.py<|end_file_name|><|fim▁begin|>"""Base settings shared by all environments.
This is a reusable basic settings file.
"""
from django.conf.global_settings import *
import os
import sys
import re
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'GB'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en-GB'
LANGUAGES = (
('en-GB', 'British English'),
)
SITE_ID = 1
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
ADMINS = (
('David Seddon', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'error': {
'level':'ERROR',
'class':'logging.handlers.RotatingFileHandler',
# 'filename': ERROR_LOG_PATH, - filled in by handler
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'debug': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
# 'filename': DEBUG_LOG_PATH, - filled in by handler
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
},
},
'loggers': {
'django': {
'handlers':['error'],
'propagate': True,
'level':'DEBUG',
},
'django.request': {
'handlers': ['mail_admins', 'error'],
'level': 'ERROR',
'propagate': False,
},
'project': {
'handlers':['debug'],
'propagate': True,
'level':'DEBUG',
},
}
}
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',<|fim▁hole|> 'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)<|fim▁end|>
|
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
|
<|file_name|>policy_set_result.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PolicySetResult(Model):
"""Result of a policy set evaluation.
:param has_error: A value indicating whether this policy set evaluation
has discovered violations.
:type has_error: bool
:param policy_violations: The list of policy violations.
:type policy_violations:
list[~azure.mgmt.devtestlabs.models.PolicyViolation]
"""
_attribute_map = {
'has_error': {'key': 'hasError', 'type': 'bool'},
'policy_violations': {'key': 'policyViolations', 'type': '[PolicyViolation]'},
}
def __init__(self, has_error=None, policy_violations=None):
super(PolicySetResult, self).__init__()
self.has_error = has_error<|fim▁hole|><|fim▁end|>
|
self.policy_violations = policy_violations
|
<|file_name|>AuthRestHandler.java<|end_file_name|><|fim▁begin|>package jp.hashiwa.elasticsearch.authplugin;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.rest.*;
import java.util.*;
import java.util.regex.Pattern;
import java.util.stream.Stream;
public class AuthRestHandler implements RestHandler {
private final Logger logger = Loggers.getLogger(AuthRestHandler.class);
private final RestHandler originalHandler;
private final RestResponse unauthorizedResponse = new RestResponse() {
@Override
public String contentType() {
return "application/json";
}
@Override
public BytesReference content() {
return new BytesArray("");
}
@Override
public RestStatus status() {
return RestStatus.UNAUTHORIZED;
}
};
private final Map<RestRequest.Method, Stream<Pattern>> authPatterns = new HashMap<RestRequest.Method, Stream<Pattern>>() {
{
this.put(RestRequest.Method.POST, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
this.put(RestRequest.Method.PUT, Stream.of(
Pattern.compile("^/testindex(/.*)?$")
));
// all methods
this.put(null, Stream.of(
Pattern.compile("^/adminindex(/.*)?$")
));
}
};
AuthRestHandler(RestHandler restHandler) {
this.originalHandler = restHandler;
}
@Override
public void handleRequest(RestRequest restRequest, RestChannel restChannel, NodeClient nodeClient) throws Exception {
this.logger.debug(restRequest.path());
this.logger.debug(restRequest.rawPath());
if (isOk(restRequest)) {
this.originalHandler.handleRequest(restRequest, restChannel, nodeClient);
} else {
restChannel.sendResponse(unauthorizedResponse);
}
}
private boolean needAuth(RestRequest.Method method, String path) {
if (authPatterns.containsKey(method)) {
Stream<Pattern> patterns = authPatterns.get(method);
boolean match = patterns.anyMatch(
p -> p.matcher(path).matches()
);
return match;
}
return false;<|fim▁hole|>
private boolean isOk(RestRequest restRequest) {
RestRequest.Method method = restRequest.method();
String path = restRequest.path(); // use rawpath() ?
boolean needAuth = needAuth(method, path)
|| needAuth(null, path);
if (! needAuth) {
return true;
}
for (java.util.Map.Entry<String, String> entry: restRequest.headers()) {
String key = entry.getKey();
String value = entry.getValue();
if (key.equals("user") && value.equals("admin")) {
return true;
}
}
return false;
// ES 5.4
// return restRequest.getHeaders().get("user").equals("admin");
}
}<|fim▁end|>
|
}
|
<|file_name|>bitcoin_nl.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="nl" version="2.1">
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About ChiliCoin</source>
<translation>Over ChiliCoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>ChiliCoin</b> version</source>
<translation><b>ChiliCoin</b> versie</translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2015 The ChiliCoin developers</source>
<translation>Copyright © 2009-2014 De Bitcoin ontwikkelaars
Copyright © 2012-2014 De NovaCoin ontwikkelaars
Copyright © 2014 De ChiliCoin ontwikkelaars</translation>
</message>
<message>
<location line="+15"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
Dit is experimentele software.
Gedistribueerd onder de MIT/X11 software licentie, zie het bijgevoegde bestand COPYING of http://www.opensource.org/licenses/mit-license.php.
Dit product bevat software ontwikkeld door het OpenSSL Project voor gebruik in de OpenSSL Toolkit (http://www.openssl.org/) en cryptografische software gemaakt door Eric Young ([email protected]) en UPnP software geschreven door Thomas Bernard.</translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Adresboek</translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Dubbelklik om het adres of label te wijzigen</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Maak een nieuw adres aan</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopieer het huidig geselecteerde adres naar het klembord</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Nieuw adres</translation>
</message>
<message>
<location line="-46"/>
<source>These are your ChiliCoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Dit zijn al jou ChiliCoin adressen om betalingen mee te ontvangen. Je kunt iedere verzender een apart adres geven zodat je kunt volgen wie jou betaald.</translation>
</message>
<message>
<location line="+60"/>
<source>&Copy Address</source>
<translation>&Kopiëer Adres</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Toon &QR Code</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a ChiliCoin address</source>
<translation>Teken een bericht om te bewijzen dat je een ChiliCoin adres bezit.</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Teken &Bericht</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Verwijder het geselecteerde adres van de lijst</translation>
</message>
<message>
<location line="-14"/>
<source>Verify a message to ensure it was signed with a specified ChiliCoin address</source>
<translation>Verifieer een bericht om zeker te zijn dat deze is ondertekend met een specifiek ChiliCoin adres</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Verifieer Bericht</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Verwijder</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+65"/>
<source>Copy &Label</source>
<translation>Kopiëer &Label</translation>
</message>
<message>
<location line="+2"/>
<source>&Edit</source>
<translation>&Bewerk</translation>
</message>
<message>
<location line="+250"/>
<source>Export Address Book Data</source>
<translation>Exporteer Adresboek Data</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommagescheiden bestand (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Fout bij exporteren</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kan niet schrijven naat bestand %1</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Label</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adres</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(geen label)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Wachtwoordscherm</translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Voer wachtwoord in</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nieuw wachtwoord</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Herhaal wachtwoord</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation>Bedoeld om het command 'sendmoney' uit te schakelen indien het OS niet meer veilig is. Geeft geen echte beveiliging.</translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation>Alleen voor staking</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+35"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Vul een nieuw wachtwoord in voor uw portemonnee. <br/> Gebruik een wachtwoord van <b>10 of meer verschillende karakters</b>, of <b> acht of meer woorden</b> . </translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Versleutel portemonnee</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Deze operatie vereist uw portemonneewachtwoord om de portemonnee te openen.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Open portemonnee</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Deze operatie vereist uw portemonneewachtwoord om de portemonnee te ontsleutelen</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Ontsleutel portemonnee</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Wijzig wachtwoord</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Vul uw oude en nieuwe portemonneewachtwoord in.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Bevestig versleuteling van de portemonnee</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation>Waarschuwing: Als je je portemonnee versleuteld en je verliest je wachtwoord zul je <b>AL JE MUNTEN VERLIEZEN</b>!</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Weet u zeker dat u uw portemonnee wilt versleutelen?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>BELANGRIJK: Elke eerder gemaakte backup van uw portemonneebestand dient u te vervangen door het nieuw gegenereerde, versleutelde portemonneebestand. Om veiligheidsredenen zullen eerdere backups van het niet-versleutelde portemonneebestand onbruikbaar worden zodra u uw nieuwe, versleutelde, portemonnee begint te gebruiken.</translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Waarschuwing: De Caps-Lock-toets staat aan!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Portemonnee versleuteld</translation>
</message>
<message>
<location line="-58"/>
<source>ChiliCoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation>ChiliCoin zal nu sluiten om het versleutel proces te voltooien. Onthou dat het versleutelen van je portemonnee je niet volledig beschermt tegen diefstal van munten door malware op je computer.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Portemonneeversleuteling mislukt</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Portemonneeversleuteling mislukt door een interne fout. Uw portemonnee is niet versleuteld.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>De opgegeven wachtwoorden komen niet overeen</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Portemonnee openen mislukt</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Het opgegeven wachtwoord voor de portemonnee-ontsleuteling is niet correct.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Portemonnee-ontsleuteling mislukt</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Portemonneewachtwoord is met succes gewijzigd.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+282"/>
<source>Sign &message...</source>
<translation>&Onderteken bericht...</translation>
</message>
<message>
<location line="+251"/>
<source>Synchronizing with network...</source>
<translation>Synchroniseren met netwerk...</translation>
</message>
<message>
<location line="-319"/>
<source>&Overview</source>
<translation>&Overzicht</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Toon algemeen overzicht van de portemonnee</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transacties</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Blader door transactieverleden</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation>&Adresboek</translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Wijzig de lijst met bewaarde adressen en labels</translation>
</message>
<message>
<location line="-13"/>
<source>&Receive coins</source>
<translation>&Ontvang munten</translation>
</message>
<message>
<location line="+1"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Toon de lijst aan adressen voor ontvangen betalingen</translation>
</message>
<message>
<location line="-7"/>
<source>&Send coins</source>
<translation>Verstuur munten</translation>
</message>
<message>
<location line="+35"/>
<source>E&xit</source>
<translation>&Afsluiten</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Programma afsluiten</translation>
</message>
<message>
<location line="+6"/>
<source>Show information about ChiliCoin</source>
<translation>Toon informatie over ChiliCoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Over &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Toon informatie over Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Opties...</translation>
</message>
<message>
<location line="+4"/>
<source>&Encrypt Wallet...</source>
<translation>&Versleutel Portemonnee...</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Backup Portemonnee...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Wijzig Wachtwoord</translation>
</message>
<message numerus="yes">
<location line="+259"/>
<source>~%n block(s) remaining</source>
<translation><numerusform>~%n blok nodig</numerusform><numerusform>~%n blokken nodig</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source>
<translation>%1 van %2 aan transactie historie blokken gedownload (%3% klaar).</translation>
</message>
<message>
<location line="-256"/>
<source>&Export...</source>
<translation>&Exporteren...</translation>
</message>
<message>
<location line="-64"/>
<source>Send coins to a ChiliCoin address</source>
<translation>Verstuur munten naar een ChiliCoin adres</translation>
</message>
<message>
<location line="+47"/>
<source>Modify configuration options for ChiliCoin</source>
<translation>Verander configuratie opties voor ChiliCoin</translation>
</message>
<message>
<location line="+18"/>
<source>Export the data in the current tab to a file</source>
<translation>Exporteer de data in de huidige tab naar een bestand</translation>
</message>
<message>
<location line="-14"/>
<source>Encrypt or decrypt wallet</source>
<translation>Versleutel of ontsleutel de portemonnee</translation>
</message>
<message>
<location line="+3"/>
<source>Backup wallet to another location</source>
<translation>Backup portemonnee naar een andere locatie</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Wijzig het wachtwoord voor uw portemonneversleuteling</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation>&Debugscherm</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Open debugging en diagnostische console</translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Verifiëer bericht...</translation>
</message>
<message>
<location line="-202"/>
<source>ChiliCoin</source>
<translation>ChiliCoin</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet</source>
<translation>Portemonnee</translation>
</message>
<message>
<location line="+180"/>
<source>&About ChiliCoin</source>
<translation>&Over ChiliCoin</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Toon / Verberg</translation>
</message>
<message>
<location line="+9"/>
<source>Unlock wallet</source>
<translation>Open portemonnee</translation>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation>&Sluit portemonnee</translation>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation>Sluit portemonnee</translation>
</message>
<message>
<location line="+35"/>
<source>&File</source>
<translation>&Bestand</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Instellingen</translation>
</message>
<message>
<location line="+8"/>
<source>&Help</source>
<translation>&Hulp</translation>
</message>
<message>
<location line="+12"/>
<source>Tabs toolbar</source>
<translation>Tab-werkbalk</translation>
</message>
<message>
<location line="+8"/>
<source>Actions toolbar</source>
<translation>Acties werkbalk</translation>
</message>
<message>
<location line="+13"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnetwerk]</translation>
</message>
<message>
<location line="+0"/>
<location line="+60"/>
<source>ChiliCoin client</source>
<translation>ChiliCoin client</translation>
</message>
<message numerus="yes">
<location line="+75"/>
<source>%n active connection(s) to ChiliCoin network</source>
<translation><numerusform>%n actieve verbinding naar ChiliCoin netwerk</numerusform><numerusform>%n actieve verbindingen naar ChiliCoin netwerk</numerusform></translation>
</message>
<message>
<location line="+40"/>
<source>Downloaded %1 blocks of transaction history.</source>
<translation>%1 blokken van transactie geschiedenis gedownload.</translation>
</message>
<message>
<location line="+413"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation>Staking. <br> Uw gewicht wordt %1 <br> Network gewicht is %2 <br> Verwachte tijd om beloning te verdienen is %3</translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation>Niet aan het staken omdat portemonnee beveiligd is</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation>Niet aan het staken omdat portemonnee offline is</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation>Niet aan het staken omdat je portemonnee aan het synchroniseren is.</translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation>Niet aan het staken omdat je geen rijpe munten hebt</translation>
</message>
<message numerus="yes">
<location line="-403"/>
<source>%n second(s) ago</source>
<translation><numerusform>%n seconden geleden</numerusform><numerusform>%n seconden geleden</numerusform></translation>
</message>
<message>
<location line="-312"/>
<source>About ChiliCoin card</source>
<translation>Over ChiliCoin card</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about ChiliCoin card</source>
<translation>Toon informatie over ChiliCoin card</translation>
</message>
<message>
<location line="+18"/>
<source>&Unlock Wallet...</source>
<translation>Ontgrendel portemonnee...</translation>
</message>
<message numerus="yes">
<location line="+297"/>
<source>%n minute(s) ago</source>
<translation><numerusform>%n minuut geleden</numerusform><numerusform>%n minuten geleden</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s) ago</source>
<translation><numerusform>%n uur geleden</numerusform><numerusform>%n uren geleden</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s) ago</source>
<translation><numerusform>%n dag geleden</numerusform><numerusform>%n dagen geleden</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Up to date</source>
<translation>Bijgewerkt</translation>
</message>
<message>
<location line="+7"/>
<source>Catching up...</source>
<translation>Aan het bijwerken...</translation>
</message>
<message>
<location line="+10"/>
<source>Last received block was generated %1.</source>
<translation>Laatst ontvangen blok is gegenereerd op %1.</translation>
</message>
<message>
<location line="+59"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>De grootte van deze transactie overschrijdt de limiet. U kan deze transactie laten uitvoeren voor een meerkost van %1, dewelke doorgestuurd wordt naar de nodes die uw transactie verwerken, alsook ter ondersteuning van het netwerk. Wil u deze meerkost betalen?</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm transaction fee</source>
<translation>Bevestig transactie kosten</translation>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Verzonden transactie</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Binnenkomende transactie</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Datum: %1
Bedrag: %2
Type: %3
Adres: %4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation>URI-behandeling</translation>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid ChiliCoin address or malformed URI parameters.</source>
<translation>URI kan niet ontleedt worden! Mogelijke oorzaken zijn een ongeldig ChiliCoin adres of incorrecte URI parameters.</translation>
</message>
<message>
<location line="+18"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Portemonnee is <b>versleuteld</b> en momenteel <b>geopend</b></translation>
</message>
<message>
<location line="+10"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Portemonnee is <b>versleuteld</b> en momenteel <b>gesloten</b></translation>
</message>
<message>
<location line="+25"/>
<source>Backup Wallet</source>
<translation>Backup Portemonnee</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Portemonnee bestanden (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Backup mislukt</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Er was een fout opgetreden bij het opslaan van de wallet data naar de nieuwe locatie.</translation>
</message>
<message numerus="yes">
<location line="+76"/>
<source>%n second(s)</source>
<translation><numerusform>%n seconden</numerusform><numerusform>%n seconden</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation><numerusform>%n minuut</numerusform><numerusform>%n minuten</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n hour(s)</source>
<translation><numerusform>%n uur</numerusform><numerusform>%n uur</numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n dag</numerusform><numerusform>%n dagen</numerusform></translation>
</message>
<message>
<location line="+18"/>
<source>Not staking</source>
<translation>Niet aan het staken.</translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+109"/>
<source>A fatal error occurred. ChiliCoin can no longer continue safely and will quit.</source>
<translation>Een fatale fout . ChiliCoin kan niet langer veilig doorgaan en sluit af.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+90"/>
<source>Network Alert</source>
<translation>Netwerkwaarschuwing</translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation>Coin controle opties</translation>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation>Kwantiteit</translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Bedrag:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation>Prioriteit:</translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation>Vergoeding:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Lage uitvoer:</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+551"/>
<source>no</source>
<translation>nee</translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation>Na vergoeding:</translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation>Wijzigen:</translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation>(de)selecteer alles</translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation>Boom modus</translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation>Lijst modus</translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Bedrag</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation>Label</translation>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Adres</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation>Bevestigingen</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Bevestigd</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation>Prioriteit</translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-515"/>
<source>Copy address</source>
<translation>Kopieer adres</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopieer label</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Kopieer bedrag</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation>Kopieer transactie-ID</translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation>Kopieer aantal</translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation>Kopieer vergoeding</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Kopieer na vergoeding</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Kopieer bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Kopieer prioriteit</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Kopieer lage uitvoer</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Kopieer wijzig</translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation>hoogste</translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation>hoog</translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation>gemiddeld hoog</translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation>gemiddeld</translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation>laag gemiddeld</translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation>laag</translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation>laagste</translation>
</message>
<message>
<location line="+155"/>
<source>DUST</source>
<translation>STOF</translation>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation>ja</translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation>Dit label wordt rood, als de transactie grootte groter is dan 10000 bytes.<br>
Dit betekend een fee van minimaal %1 per kb is noodzakelijk.<br>
Kan varieren van +/- 1 Byte per invulling</translation>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation>Transacties met hogere prioriteit komen sneller in een blok
Dit label wordt rood, als de prioriteit kleiner is dan "normaal".
Dit betekend een fee van minimaal %1 per kb is noodzakelijk.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation>Dit label wordt rood, als elke ontvanger ontvangt een bedrag dat kleiner is dan 1%.
Dit betekent dat een vergoeding van ten minste 2% is vereist.
Bedragen onder 0.546 keer het minimum vergoeding worden weergegeven als DUST.</translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation>Dit label wordt rood, als de verandering kleiner is dan %1.
Dit betekend dat een fee van %2 is vereist.</translation>
</message>
<message>
<location line="+37"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(geen label)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation>wijzig van %1 (%2)</translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation>(wijzig)</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Bewerk Adres</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Label</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Het label geassocieerd met deze notitie in het adresboek</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adres</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Het adres geassocieerd met deze notitie in het adresboek. Dit kan enkel aangepast worden bij verzend-adressen.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+20"/>
<source>New receiving address</source>
<translation>Nieuw ontvangstadres</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nieuw adres om naar te verzenden</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Bewerk ontvangstadres</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Bewerk adres om naar te verzenden</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Het opgegeven adres "%1" bestaat al in uw adresboek.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid ChiliCoin address.</source>
<translation>Het ingevoerde adres "%1" is geen geldig ChiliCoin adres.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Kon de portemonnee niet openen.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Genereren nieuwe sleutel mislukt.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+420"/>
<location line="+12"/>
<source>chilicoin-qt</source>
<translation>chilicoin-qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>versie</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Gebruik:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>Commandoregel-opties</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>Gebruikerinterface-opties</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Stel taal in, bijvoorbeeld "de_DE" (standaard: systeeminstellingen)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Geminimaliseerd starten</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Laat laadscherm zien bij het opstarten. (standaard: 1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Opties</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Algemeen</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation>Optioneel transactiekosten per kB dat helpt ervoor zorgen dat uw transacties worden snel verwerkt. De meeste transacties zijn 1 kB. Fee 0.01 aanbevolen.</translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Betaal &transactiekosten</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation>Gereserveerde hoeveelheid doet niet mee in staking en is daarom altijd uitgeefbaar.</translation>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation>Gereserveerd</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start ChiliCoin after logging in to the system.</source>
<translation>Automatisch starten van ChiliCoin na inloggen van het systeem.</translation>
</message>
<message>
<location line="+3"/>
<source>&Start ChiliCoin on system login</source>
<translation>&Start ChiliCoin bij systeem aanmelding</translation>
</message>
<message>
<location line="+7"/>
<source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source>
<translation>Los blok en adressenbestanden bij het afsluiten. Dit betekent dat ze naar een ander data-map worden verplaatst, maar het vertraagt shutdown. De portemonnee is altijd vrijstaand.</translation>
</message>
<message>
<location line="+3"/>
<source>&Detach databases at shutdown</source>
<translation>&Koppel database los bij afsluiten</translation>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Netwerk</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the ChiliCoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>De ChiliCoin client poort automatisch openen op de router. Dit werkt alleen wanneer uw router UPnP ondersteunt en deze is ingeschakeld.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Portmapping via &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the ChiliCoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Verbinding maken met het ChiliCoin netwerk via een SOCKS proxy (bijvoorbeeld als U gebruik maakt van Tor)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Verbind door SOCKS proxy:</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>Proxy &IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>IP Adres van de proxy (bijv. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Poort:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Poort van de proxy (bijv. 9050)</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS-&Versie:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>SOCKS-versie van de proxy (bijv. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Scherm</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Laat alleen een systeemvak-icoon zien wanneer het venster geminimaliseerd is</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimaliseer naar het systeemvak in plaats van de taakbalk</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimaliseer het venster in de plaats van de applicatie af te sluiten als het venster gesloten wordt. Wanneer deze optie aan staan, kan de applicatie alleen worden afgesloten door Afsluiten te kiezen in het menu.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>Minimaliseer bij sluiten van het &venster</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Interface</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Taal &Gebruikersinterface:</translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting ChiliCoin.</source>
<translation>De user interface-taal kan hier ingesteld worden. Deze instelling word toegepast na ChiliCoin opnieuw op te starten.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Eenheid om bedrag in te tonen:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Kies de standaard onderverdelingseenheid om weer te geven in uw programma, en voor het versturen van munten</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show ChiliCoin addresses in the transaction list or not.</source>
<translation>ChiliCoin adressen in de transactielijst weergeven of niet</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>Toon a&dressen in de transactielijst</translation>
</message>
<message>
<location line="+7"/>
<source>Whether to show coin control features or not.</source>
<translation>Munt controle functies weergeven of niet.</translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation>Laat coin & control functies zien (enkel voor gevorderden!)</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&OK</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>Ann&uleren</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Toepassen</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+55"/>
<source>default</source>
<translation>standaard</translation>
</message>
<message>
<location line="+149"/>
<location line="+9"/>
<source>Warning</source>
<translation>Waarschuwing</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting ChiliCoin.</source>
<translation>Deze instelling word toegepast na een restart van ChiliCoin.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Het opgegeven proxyadres is ongeldig.</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Vorm</translation>
</message>
<message>
<location line="+33"/>
<location line="+231"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the ChiliCoin network after a connection is established, but this process has not completed yet.</source>
<translation>De weergegeven informatie kan verouderd zijn, Je portemonnee synchroniseerd automatisch met het ChiliCoin netwerk nadat er verbindig is gemaakt, maar dit proces is nog niet voltooid.</translation>
</message>
<message>
<location line="-160"/>
<source>Stake:</source>
<translation>Stake:</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Onbevestigd:</translation>
</message>
<message>
<location line="-107"/>
<source>Wallet</source>
<translation>Portemonnee</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation>Uitgeefbaar:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation>Uw beschikbare saldo</translation>
</message>
<message>
<location line="+71"/>
<source>Immature:</source>
<translation>Immatuur:</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Gedolven saldo dat nog niet tot wasdom is gekomen</translation>
</message>
<message>
<location line="+20"/>
<source>Total:</source>
<translation>Totaal:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation>Uw totale saldo</translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Recente transacties</b></translation>
</message>
<message>
<location line="-108"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Totaal van de transacties die nog moeten worden bevestigd, en nog niet mee voor het huidige balans</translation>
</message>
<message>
<location line="-29"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation>Totaal aantal munten dat was staked, en nog niet telt voor huidige balans.</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+113"/>
<location line="+1"/>
<source>out of sync</source>
<translation>niet gesynchroniseerd</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>QR Code Scherm</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Vraag betaling</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Hoeveelheid:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Label:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Bericht:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Opslaan als...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Fout tijdens encoderen URI in QR-code</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>De ingevoerde hoeveel is ongeldig, controleer aub.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>Resulterende URI te lang, probeer de tekst korter te maken voor het label/bericht.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Sla QR Code op.</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>PNG Afbeeldingen (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Clientnaam</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+348"/>
<source>N/A</source>
<translation>N.v.t.</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Clientversie</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informatie</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Gebruikt OpenSSL versie</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Opstarttijd</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Netwerk</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Aantal connecties</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Op testnetwerk</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Blokketen</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Huidig aantal blokken</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Geschat totaal aantal blokken</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Tijd laatste blok</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Open</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>Commandoregel-opties</translation>
</message>
<message>
<location line="+7"/>
<source>Show the chilicoin-qt help message to get a list with possible ChiliCoin command-line options.</source>
<translation>Laat het chilicoin-qt help bericht zien om een lijst te krijgen met mogelijke ChiliCoin command-regel opties.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Show</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Console</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Bouwdatum</translation>
</message>
<message>
<location line="-104"/>
<source>ChiliCoin - Debug window</source>
<translation>ChiliCoin - Debugscherm</translation>
</message>
<message>
<location line="+25"/>
<source>ChiliCoin Core</source>
<translation>ChiliCoin Kern</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Debug-logbestand</translation>
</message>
<message>
<location line="+7"/>
<source>Open the ChiliCoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Open het ChiliCoin debug log bestand van de huidige data map. Dit kan een paar seconden duren voor grote log bestanden.</translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Maak console leeg</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-33"/>
<source>Welcome to the ChiliCoin RPC console.</source>
<translation>Welkom bij de ChiliCoin RPC console.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Gebruik de pijltjestoetsen om door de geschiedenis te navigeren, en <b>Ctrl-L</b> om het scherm leeg te maken.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Typ <b>help</b> voor een overzicht van de beschikbare commando's.</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Verstuur munten</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation>Coin controle opties</translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation>Invoer...</translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation>automatisch geselecteerd</translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation>Onvoldoende fonds!</translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation>Kwantiteit</translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation>0</translation>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation>Bytes:</translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Bedrag:</translation>
</message>
<message>
<location line="+22"/>
<location line="+86"/>
<location line="+86"/>
<location line="+32"/>
<source>0.00 hack</source>
<translation>123.456 hack</translation>
</message>
<message>
<location line="-191"/>
<source>Priority:</source>
<translation>Prioriteit:</translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation>gemiddeld</translation>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation>Vergoeding:</translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation>Lage uitvoer:</translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation>nee</translation>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation>Na vergoeding:</translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation>Wijzigen</translation>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation>handmatig veranderen adres</translation>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Verstuur aan verschillende ontvangers ineens</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>Voeg &Ontvanger Toe</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Verwijder alles in de invulvelden</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Verwijder &Alles</translation>
</message>
<message>
<location line="+28"/>
<source>Balance:</source>
<translation>Saldo:</translation>
</message>
<message>
<location line="+16"/>
<source>123.456 hack</source>
<translation>123.456 hack</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source><|fim▁hole|> </message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Verstuur</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-173"/>
<source>Enter a ChiliCoin address (e.g. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</source>
<translation>Voeg een ChiliCoin adres in (bijv. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</translation>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation>Kopieer aantal</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopieer bedrag</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation>Kopieer vergoeding</translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation>Kopieer na vergoeding</translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation>Kopieer bytes</translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation>Kopieer prioriteit</translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation>Kopieer lage uitvoer</translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation>Kopieer wijzig</translation>
</message>
<message>
<location line="+86"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b> %1 </b> to %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Bevestig versturen munten</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Weet je zeker dat je %1 wilt verzenden?</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>en</translation>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Het ontvangstadres is niet geldig, controleer uw invoer.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Het ingevoerde bedrag moet groter zijn dan 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Bedrag is hoger dan uw huidige saldo</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Totaal overschrijdt uw huidige saldo wanneer de %1 transactiekosten worden meegerekend</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Dubbel adres gevonden, u kunt slechts eenmaal naar een bepaald adres verzenden per verstuurtransactie</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed.</source>
<translation>FOUT: Creëren van transactie mislukt.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fout: De transactie was geweigerd, Dit kan gebeuren als sommige munten in je portemonnee al gebruikt zijn, door het gebruik van een kopie van wallet.dat en de munten in de kopie zijn niet gemarkeerd als gebruikt.</translation>
</message>
<message>
<location line="+251"/>
<source>WARNING: Invalid ChiliCoin address</source>
<translation>WAARSCHUWING: Ongeldig ChiliCoin adres</translation>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(geen label)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation>WAARSCHUWING: Onbekend adres</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Vorm</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>Bedra&g:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Betaal &Aan:</translation>
</message>
<message>
<location line="+24"/>
<location filename="../sendcoinsentry.cpp" line="+25"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Vul een label in voor dit adres om het toe te voegen aan uw adresboek</translation>
</message>
<message>
<location line="+9"/>
<source>&Label:</source>
<translation>&Label:</translation>
</message>
<message>
<location line="+18"/>
<source>The address to send the payment to (e.g. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</source>
<translation>Het adres om naar te betalen (bijv. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</translation>
</message>
<message>
<location line="+10"/>
<source>Choose address from address book</source>
<translation>Kies adres uit adresboek</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Plak adres vanuit klembord</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Verwijder deze ontvanger</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a ChiliCoin address (e.g. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</source>
<translation>Voeg een ChiliCoin adres in (bijv. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Handtekeningen - Onderteken een bericht / Verifiëer een handtekening</translation>
</message>
<message>
<location line="+13"/>
<location line="+124"/>
<source>&Sign Message</source>
<translation>O&nderteken Bericht</translation>
</message>
<message>
<location line="-118"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>U kunt berichten ondertekenen met een van uw adressen om te bewijzen dat u dit adres bezit. Pas op dat u geen onduidelijke dingen ondertekent, want phishingaanvallen zouden u kunnen misleiden om zo uw identiteit te stelen. Onderteken alleen berichten waarmee u het volledig eens bent.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</source>
<translation>Het adres om het bericht te ondertekenen (bijv. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm) </translation>
</message>
<message>
<location line="+10"/>
<location line="+203"/>
<source>Choose an address from the address book</source>
<translation>Kies een adres uit het adresboek</translation>
</message>
<message>
<location line="-193"/>
<location line="+203"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-193"/>
<source>Paste address from clipboard</source>
<translation>Plak adres vanuit klembord</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Typ hier het bericht dat u wilt ondertekenen</translation>
</message>
<message>
<location line="+24"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Kopieer de huidige handtekening naar het systeemklembord</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this ChiliCoin address</source>
<translation>Teken een bericht om te bewijzen dat je een ChiliCoin adres bezit.</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation>Verwijder alles in de invulvelden</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Verwijder &Alles</translation>
</message>
<message>
<location line="-87"/>
<location line="+70"/>
<source>&Verify Message</source>
<translation>&Verifiëer Bericht</translation>
</message>
<message>
<location line="-64"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Voer het ondertekenende adres, bericht en handtekening hieronder in (let erop dat u nieuwe regels, spaties en tabs juist overneemt) om de handtekening te verifiëren. Let erop dat u niet meer uit het bericht interpreteert dan er daadwerkelijk staat, om te voorkomen dat u wordt misleid in een man-in-the-middle-aanval.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</source>
<translation>Het adres van het bericht is ondertekend met (bijv. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified ChiliCoin address</source>
<translation>Verifieer een bericht om zeker te zijn dat deze is ondertekend met een specifiek ChiliCoin adres</translation>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation>Verwijder alles in de invulvelden</translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a ChiliCoin address (e.g. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</source>
<translation>Voeg een ChiliCoin adres in (bijv. PXkRZYXwNDxsgvMmtE3M7rXJKDohbf54Pm)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Klik "Onderteken Bericht" om de handtekening te genereren</translation>
</message>
<message>
<location line="+3"/>
<source>Enter ChiliCoin signature</source>
<translation>Voer ChiliCoin handtekening in</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Het opgegeven adres is ongeldig.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Controleer s.v.p. het adres en probeer het opnieuw.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Het opgegeven adres verwijst niet naar een sleutel.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Portemonnee-ontsleuteling is geannuleerd</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Geheime sleutel voor het ingevoerde adres is niet beschikbaar.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Ondertekenen van het bericht is mislukt.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Bericht ondertekend.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>De handtekening kon niet worden gedecodeerd.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Controleer s.v.p. de handtekening en probeer het opnieuw.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>De handtekening hoort niet bij het bericht.</translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Berichtverificatie mislukt.</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Bericht correct geverifiëerd.</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+19"/>
<source>Open until %1</source>
<translation>Openen totdat %1</translation>
</message>
<message numerus="yes">
<location line="-2"/>
<source>Open for %n block(s)</source>
<translation><numerusform>Open voor nog %n blok</numerusform><numerusform>Open voor nog %n blokken</numerusform></translation>
</message>
<message>
<location line="+8"/>
<source>conflicted</source>
<translation>conflicted</translation>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1/offline</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/onbevestigd</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 bevestigingen</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Status</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, uitgezonden naar %n node</numerusform><numerusform>, uitgezonden naar %n nodes</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Bron</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Gegenereerd</translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Van</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Aan</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation>eigen adres</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>label</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Credit</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>komt tot wasdom na %n nieuw blok</numerusform><numerusform>komt tot wasdom na %n nieuwe blokken</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>niet geaccepteerd</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debet</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Transactiekosten</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Netto bedrag</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Bericht</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Opmerking</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>Transactie-ID:</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 510 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Gegenereerd munten moeten 510 blokken maturen voordat ze kunnen worden besteed. Wanneer je een blok genereerd, het naar het netwerk is verzonden en toegevoegd aan de blokketen, zal de status veranderen naar "niet geaccepteerd"and kan het niet uitgegeven worden. Dit kan soms gebeuren als een ander knooppunt genereert een blok binnen een paar seconden na jou.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Debug-informatie</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transactie</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>Inputs</translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Bedrag</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>waar</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>onwaar</translation>
</message>
<message>
<location line="-211"/>
<source>, has not been successfully broadcast yet</source>
<translation>, is nog niet met succes uitgezonden</translation>
</message>
<message>
<location line="+35"/>
<source>unknown</source>
<translation>onbekend</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Transactiedetails</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Dit venster laat een uitgebreide beschrijving van de transactie zien</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+226"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adres</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Bedrag</translation>
</message>
<message>
<location line="+60"/>
<source>Open until %1</source>
<translation>Open tot %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Bevestigd (%1 bevestigingen)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Open voor nog %n blok</numerusform><numerusform>Open voor nog %n blokken</numerusform></translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation>Offline</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation>Onbevestigd:</translation>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation>Bevestigen.. (%1 van de %2 bevestigingen)</translation>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation>Conflicted</translation>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation>Immature (%1 bevestiging, word beschikbaar na %2)</translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Dit blok is niet ontvangen bij andere nodes en zal waarschijnlijk niet worden geaccepteerd!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Gegenereerd maar niet geaccepteerd</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Ontvangen met</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Ontvangen van</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Verzonden aan</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Betaling aan uzelf</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Gedolven</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(nvt)</translation>
</message>
<message>
<location line="+190"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Transactiestatus. Houd de muiscursor boven dit veld om het aantal bevestigingen te laten zien.</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Datum en tijd waarop deze transactie is ontvangen.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Type transactie.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Ontvangend adres van transactie.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Bedrag verwijderd van of toegevoegd aan saldo</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+55"/>
<location line="+16"/>
<source>All</source>
<translation>Alles</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Vandaag</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Deze week</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Deze maand</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Vorige maand</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Dit jaar</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Bereik...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Ontvangen met</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Verzonden aan</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Aan uzelf</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Gedolven</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Anders</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Vul adres of label in om te zoeken</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Min. bedrag</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopieer adres</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopieer label</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopieer bedrag</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Kopieer transactie-ID</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Bewerk label</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Toon transactiedetails</translation>
</message>
<message>
<location line="+144"/>
<source>Export Transaction Data</source>
<translation>Exporteer Transactie Data</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Kommagescheiden bestand (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Bevestigd</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Type</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Label</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adres</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Bedrag</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Fout bij exporteren</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Kan niet schrijven naar bestand %1</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Bereik:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>naar</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+206"/>
<source>Sending...</source>
<translation>Versturen...</translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+33"/>
<source>ChiliCoin version</source>
<translation>ChiliCoin versie</translation>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Gebruik:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or chilicoind</source>
<translation>Verstuur commando naar -server of chilicoind</translation>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Lijst van commando's</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Toon hulp voor een commando</translation>
</message>
<message>
<location line="+2"/>
<source>Options:</source>
<translation>Opties:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: ChiliCoin.conf)</source>
<translation>Selecteer configuratie bestand (standaard: ChiliCoin.conf)</translation>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: chilicoind.pid)</source>
<translation>Selecteer pid bestand (standaard: ChiliCoin.conf)</translation>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation>Specificeer het portemonnee bestand (vanuit de gegevensmap)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Stel datamap in</translation>
</message>
<message>
<location line="+2"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Stel databankcachegrootte in in megabytes (standaard: 25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation>Stel database cache grootte in in megabytes (standaard: 100)</translation>
</message>
<message>
<location line="+6"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation>Luister voor verbindingen op <poort> (standaard: 15714 of testnet: 25714)</translation>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Onderhoud maximaal <n> verbindingen naar peers (standaard: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Verbind naar een node om adressen van anderen op te halen, en verbreek vervolgens de verbinding</translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation>Specificeer uw eigen publieke adres</translation>
</message>
<message>
<location line="+5"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation>Koppel aan gegeven adres. Gebruik [host]:poort notatie voor IPv6</translation>
</message>
<message>
<location line="+2"/>
<source>Stake your coins to support network and gain reward (default: 1)</source>
<translation>Stake je munten om netwerk te supporten en krijg hiervoor beloning (standaard: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Drempel om verbinding te verbreken naar zich misdragende peers (standaard: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Aantal seconden dat zich misdragende peers niet opnieuw mogen verbinden (standaard: 86400)</translation>
</message>
<message>
<location line="-44"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Er is een fout opgetreden tijdens het instellen van de inkomende RPC-poort %u op IPv4: %s</translation>
</message>
<message>
<location line="+51"/>
<source>Detach block and address databases. Increases shutdown time (default: 0)</source>
<translation>Ontkoppel blok en adressenbestanden. Verhoogt shutdown tijd (standaard: 0)</translation>
</message>
<message>
<location line="+109"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Fout: De transactie was geweigerd, Dit kan gebeuren als sommige munten in je portemonnee al gebruikt zijn, door het gebruik van een kopie van wallet.dat en de munten in de kopie zijn niet gemarkeerd als gebruikt.</translation>
</message>
<message>
<location line="-5"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source>
<translation>Fout: Deze transactie vereist een transactie vergoeding van ten minste %s vanwege de hoeveelheid, complexiteit, of het gebruik van recent ontvangen gelden</translation>
</message>
<message>
<location line="-87"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation>Wacht op JSON-RPC-connecties op <poort> (standaard: 15715 of testnet: 25715) </translation>
</message>
<message>
<location line="-11"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Aanvaard commandoregel- en JSON-RPC-commando's</translation>
</message>
<message>
<location line="+101"/>
<source>Error: Transaction creation failed </source>
<translation>Fout: Creëren van transactie mislukt.</translation>
</message>
<message>
<location line="-5"/>
<source>Error: Wallet locked, unable to create transaction </source>
<translation>Fout: Portemonnee is op slot, niet mogelijk een transactie te creëren.</translation>
</message>
<message>
<location line="-8"/>
<source>Importing blockchain data file.</source>
<translation>Importeren van blokketen data bestand.</translation>
</message>
<message>
<location line="+1"/>
<source>Importing bootstrap blockchain data file.</source>
<translation>Importeren van blokketen data bestand.</translation>
</message>
<message>
<location line="-88"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Draai in de achtergrond als daemon en aanvaard commando's</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Gebruik het testnetwerk</translation>
</message>
<message>
<location line="-24"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Accepteer verbindingen van buitenaf (standaard: 1 als geen -proxy of -connect is opgegeven)</translation>
</message>
<message>
<location line="-38"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Er is een fout opgetreden tijdens het instellen van de inkomende RPC-poort %u op IPv6, terugval naar IPv4: %s</translation>
</message>
<message>
<location line="+117"/>
<source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source>
<translation>Fout bij het initialiseren van de database omgeving %s! Om te herstellen, BACKUP die directory, verwijder dan alles van behalve het wallet.dat.</translation>
</message>
<message>
<location line="-20"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Stel maximale grootte van high-priority/low-fee transacties in bytes (standaard: 27000)</translation>
</message>
<message>
<location line="+11"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Waarschuwing: -paytxfee is zeer hoog ingesteld. Dit zijn de transactiekosten die u betaalt bij het versturen van een transactie.</translation>
</message>
<message>
<location line="+61"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong ChiliCoin will not work properly.</source>
<translation>Waarschuwing: Controleer of de datum en tijd van de computer juist zijn! Als uw klok verkeerd is ChiliCoin zal niet goed werken.</translation>
</message>
<message>
<location line="-31"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Waarschuwing: Fout bij het lezen van wallet.dat! Alle sleutels zijn in goede orde uitgelezen, maar transactiedata of adresboeklemma's zouden kunnen ontbreken of fouten bevatten.</translation>
</message>
<message>
<location line="-18"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Waarschuwing: wallet.dat is corrupt, data is veiliggesteld! Originele wallet.dat is opgeslagen als wallet.{tijdstip}.bak in %s; als uw balans of transacties incorrect zijn dient u een backup terug te zetten.</translation>
</message>
<message>
<location line="-30"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Poog de geheime sleutels uit een corrupt wallet.dat bestand terug te halen</translation>
</message>
<message>
<location line="+4"/>
<source>Block creation options:</source>
<translation>Blokcreatie-opties:</translation>
</message>
<message>
<location line="-62"/>
<source>Connect only to the specified node(s)</source>
<translation>Verbind alleen naar de gespecificeerde node(s)</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Ontdek eigen IP-adres (standaard: 1 als er wordt geluisterd en geen -externalip is opgegeven)</translation>
</message>
<message>
<location line="+94"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>Mislukt om op welke poort dan ook te luisteren. Gebruik -listen=0 as u dit wilt.</translation>
</message>
<message>
<location line="-90"/>
<source>Find peers using DNS lookup (default: 1)</source>
<translation>Zoek peers doormiddel van DNS lookup (standaard: 1)</translation>
</message>
<message>
<location line="+5"/>
<source>Sync checkpoints policy (default: strict)</source>
<translation>Sync checkpoints beleid (standaard: strikt)</translation>
</message>
<message>
<location line="+83"/>
<source>Invalid -tor address: '%s'</source>
<translation>Ongeldig-tor adres: '%s'</translation>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation>Ongeldig bedrag voor -reservebalance = <bedrag></translation>
</message>
<message>
<location line="-82"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Maximum per-connectie ontvangstbuffer, <n>*1000 bytes (standaard: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Maximum per-connectie zendbuffer, <n>*1000 bytes (standaard: 1000)</translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation>Verbind alleen naar nodes in netwerk <net> (IPv4, IPv6 of Tor)</translation>
</message>
<message>
<location line="+28"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Geef extra debugging informatie weer. Impliceert alle andere debug * opties</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Geef extra netwerk debug informatie weer</translation>
</message>
<message>
<location line="+1"/>
<source>Prepend debug output with timestamp</source>
<translation>Voeg een tijdstempel toe aan debug output</translation>
</message>
<message>
<location line="+35"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL-opties: (zie de Bitcoin wiki voor SSL-instructies)</translation>
</message>
<message>
<location line="-74"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Selecteer de versie van socks proxy (4-5, default: 5)</translation>
</message>
<message>
<location line="+41"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Stuur trace/debug-info naar de console in plaats van het debug.log bestand</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Stuur trace/debug info naar de debugger</translation>
</message>
<message>
<location line="+28"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Stel maximale block grootte in bytes in (standaard: 250000)</translation>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Stel minimum blokgrootte in in bytes (standaard: 0)</translation>
</message>
<message>
<location line="-29"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Verklein debug.log-bestand bij het opstarten van de client (standaard: 1 als geen -debug)</translation>
</message>
<message>
<location line="-42"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Specificeer de time-outtijd in milliseconden (standaard: 5000)</translation>
</message>
<message>
<location line="+109"/>
<source>Unable to sign checkpoint, wrong checkpointkey?
</source>
<translation>Kan checkpoint niet ondertekenen, verkeerde checkpoint sleutel?
</translation>
</message>
<message>
<location line="-80"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Gebruik UPnP om de luisterende poort te mappen (standaard: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Gebruik UPnP om de luisterende poort te mappen (standaard: 1 als er wordt geluisterd)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Gebruik proxy tor verborgen diensten (standaard: zelfde als -proxy)</translation>
</message>
<message>
<location line="+42"/>
<source>Username for JSON-RPC connections</source>
<translation>Gebruikersnaam voor JSON-RPC-verbindingen</translation>
</message>
<message>
<location line="+47"/>
<source>Verifying database integrity...</source>
<translation>Database integriteit wordt geverifieërd</translation>
</message>
<message>
<location line="+57"/>
<source>WARNING: syncronized checkpoint violation detected, but skipped!</source>
<translation>WAARSCHUWING: gesynchroniseerd checkpoint overtreding is geconstateerd, maar overgeslagen!</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: Disk space is low!</source>
<translation>Waarschuwing: Hardeschijf raakt vol!</translation>
</message>
<message>
<location line="-2"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Waarschuwing: Deze versie is verouderd, een upgrade is vereist!</translation>
</message>
<message>
<location line="-48"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>wallet.dat corrupt, veiligstellen mislukt</translation>
</message>
<message>
<location line="-54"/>
<source>Password for JSON-RPC connections</source>
<translation>Wachtwoord voor JSON-RPC-verbindingen</translation>
</message>
<message>
<location line="-84"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=ChiliCoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "ChiliCoin Alert" [email protected]
</source>
<translation>%s, u moet een rpcpassword instellen in het configuratiebestand:
%s
Het wordt aanbevolen de volgende willekeurig wachtwoord gebruiken:
rpcuser = ChiliCoinrpc
rpcpassword = %s
(je hoeft niet dit wachtwoord te onthouden)
De gebruikersnaam en het wachtwoord MAG NIET hetzelfde zijn.
Als het bestand niet bestaat, maakt u met leesbare-alleen-eigenaar bestandsbeheermachtigingen.
Het wordt ook aanbevolen om alertnotify instellen zodat u een melding van problemen;
bijvoorbeeld: alertnotify = echo %%s | mail -s "ChiliCoin Alert" [email protected] </translation>
</message>
<message>
<location line="+51"/>
<source>Find peers using internet relay chat (default: 0)</source>
<translation>Zoek peers door gebruik van Internet Relay Chat (standaard: 1) {? 0)}</translation>
</message>
<message>
<location line="+5"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation>Synchroniseer tijd met andere connecties. Uitschakelen als de tijd op uw systeem nauwkeurig is bijv. synchroniseren met NTP (standaard: 1)</translation>
</message>
<message>
<location line="+15"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation>Bij het maken van transacties, negeer ingangen met waarde minder dan dit (standaard: 0,01)</translation>
</message>
<message>
<location line="+16"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Sta JSON-RPC verbindingen van opgegeven IP-adres toe</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Verstuur commando's naar proces dat op <ip> draait (standaard: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Voer commando uit zodra het beste blok verandert (%s in cmd wordt vervangen door blockhash)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Voer opdracht uit zodra een portemonneetransactie verandert (%s in cmd wordt vervangen door TxID)</translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation>Vereist een bevestiging voor verandering (standaard: 0)</translation>
</message>
<message>
<location line="+1"/>
<source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source>
<translation>Dwing transactie scripts gebruik van canonieke PUSH operatoren (standaard: 1)</translation>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Voer opdracht uit zodra een relevante waarschuwing wordt ontvangen (%s in cmd wordt vervangen door bericht)</translation>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Vernieuw portemonnee naar nieuwste versie</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Stel sleutelpoelgrootte in op <n> (standaard: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Doorzoek de blokketen op ontbrekende portemonnee-transacties</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 2500, 0 = all)</source>
<translation>Hoeveel blokken controleren bij opstarten (standaard: 2500, 0= alles)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation>Hoe grondig het blokverificatie is (0-6, standaard: 1)</translation>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation>Importeer blokken van extern blk000?.dat bestand</translation>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Gebruik OpenSSL (https) voor JSON-RPC-verbindingen</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Certificaat-bestand voor server (standaard: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Geheime sleutel voor server (standaard: server.pem)</translation>
</message>
<message>
<location line="+1"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Aanvaardbare cijfers (standaard: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+53"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation>Fout: Portemonnee ontgrendeld voor alleen staking, niet in staat om de transactie te maken.</translation>
</message>
<message>
<location line="+18"/>
<source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source>
<translation>WAARSCHUWING: Ongeldig controlepunt gevonden! Weergegeven transacties kunnen niet kloppen! Het is mogelijk dat je moet upgraden, of developers moet waarschuwen.</translation>
</message>
<message>
<location line="-158"/>
<source>This help message</source>
<translation>Dit helpbericht</translation>
</message>
<message>
<location line="+95"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation>Portemonnee %s bevindt zich buiten de datamap %s.</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot obtain a lock on data directory %s. ChiliCoin is probably already running.</source>
<translation>Kan een slot op data directory %s niet verkrijgen. ChiliCoin wordt waarschijnlijk al uitgevoerd.</translation>
</message>
<message>
<location line="-98"/>
<source>ChiliCoin</source>
<translation>ChiliCoin</translation>
</message>
<message>
<location line="+140"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Niet in staat om aan %s te binden op deze computer (bind gaf error %d, %s)</translation>
</message>
<message>
<location line="-130"/>
<source>Connect through socks proxy</source>
<translation>Verbind door socks proxy</translation>
</message>
<message>
<location line="+3"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Sta DNS-naslag toe voor -addnode, -seednode en -connect</translation>
</message>
<message>
<location line="+122"/>
<source>Loading addresses...</source>
<translation>Adressen aan het laden...</translation>
</message>
<message>
<location line="-15"/>
<source>Error loading blkindex.dat</source>
<translation>Fout bij laden van blkindex.dat</translation>
</message>
<message>
<location line="+2"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Fout bij laden wallet.dat: Portemonnee corrupt</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of ChiliCoin</source>
<translation>Fout bij laden van wallet.dat: Portemonnee vereist een nieuwere versie van ChiliCoin</translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart ChiliCoin to complete</source>
<translation>Portemonnee moet herschreven worden: herstart ChiliCoin om te voltooien</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Fout bij laden wallet.dat</translation>
</message>
<message>
<location line="-16"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Ongeldig -proxy adres: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Onbekend netwerk gespecificeerd in -onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Onbekende -socks proxyversie aangegeven: %i</translation>
</message>
<message>
<location line="+4"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Kan -bind adres niet herleiden: '%s'</translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Kan -externlip adres niet herleiden: '%s'</translation>
</message>
<message>
<location line="-24"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Ongeldig bedrag voor -paytxfee=<bedrag>: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Error: could not start node</source>
<translation>Fout: kan geen verbinding maken met node</translation>
</message>
<message>
<location line="+11"/>
<source>Sending...</source>
<translation>Versturen...</translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Ongeldig bedrag</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Ontoereikend saldo</translation>
</message>
<message>
<location line="-34"/>
<source>Loading block index...</source>
<translation>Blokindex aan het laden...</translation>
</message>
<message>
<location line="-103"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Voeg een node om naar te verbinden toe en probeer de verbinding open te houden</translation>
</message>
<message>
<location line="+122"/>
<source>Unable to bind to %s on this computer. ChiliCoin is probably already running.</source>
<translation>Niet mogelijk om %s op deze computer. ChiliCoin is waarschijnlijk al geopened.</translation>
</message>
<message>
<location line="-97"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Vergoeding per KB toe te voegen aan de transacties die u verzendt</translation>
</message>
<message>
<location line="+55"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation>Ongeldig bedrag voor -mininput = <bedrag>: '%s'</translation>
</message>
<message>
<location line="+25"/>
<source>Loading wallet...</source>
<translation>Portemonnee aan het laden...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>Kan portemonnee niet downgraden</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot initialize keypool</source>
<translation>Kan keypool niet initialiseren</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>Kan standaardadres niet schrijven</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Blokketen aan het doorzoeken...</translation>
</message>
<message>
<location line="+5"/>
<source>Done loading</source>
<translation>Klaar met laden</translation>
</message>
<message>
<location line="-167"/>
<source>To use the %s option</source>
<translation>Om de %s optie te gebruiken</translation>
</message>
<message>
<location line="+14"/>
<source>Error</source>
<translation>Fout</translation>
</message>
<message>
<location line="+6"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>U dient rpcpassword=<wachtwoord> in te stellen in het configuratiebestand:
%s
Als het bestand niet bestaat, maak het dan aan, met een alleen-lezen permissie.</translation>
</message>
</context>
</TS><|fim▁end|>
|
<translation>Bevestig de verstuuractie</translation>
|
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>use std::error::Error;
use std::fmt::{Display, Formatter, Result};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct NotConnected;
impl Display for NotConnected {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, "NotConnected error: {}", self)
}<|fim▁hole|>
impl Error for NotConnected {
fn description(&self) -> &str {
"Microbrute not connected."
}
}<|fim▁end|>
|
}
|
<|file_name|>ICustomDrawGuiDisplay.java<|end_file_name|><|fim▁begin|>package joshie.progression.api.gui;
<|fim▁hole|>import net.minecraftforge.fml.relauncher.SideOnly;
/** Implement this on rewards, triggers, filters, conditions,
* if you wish to draw something special on them, other than default fields. */
public interface ICustomDrawGuiDisplay {
@SideOnly(Side.CLIENT)
public void drawDisplay(IDrawHelper helper, int renderX, int renderY, int mouseX, int mouseY);
}<|fim▁end|>
|
import net.minecraftforge.fml.relauncher.Side;
|
<|file_name|>signatures.go<|end_file_name|><|fim▁begin|>package ct
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/pem"
"errors"
//"flag"
"fmt"
"log"
"math/big"
)
var allowVerificationWithNonCompliantKeys = false //flag.Bool("allow_verification_with_non_compliant_keys", false,
//"Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.")
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
p, rest := pem.Decode(b)
if p == nil {
return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
}
k, err := x509.ParsePKIXPublicKey(p.Bytes)
return k, sha256.Sum256(p.Bytes), rest, err
}
// SignatureVerifier can verify signatures on SCTs and STHs
type SignatureVerifier struct {
pubKey crypto.PublicKey
}
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
switch pkType := pk.(type) {
case *rsa.PublicKey:
if pkType.N.BitLen() < 2048 {
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
if !(allowVerificationWithNonCompliantKeys) {
return nil, e
}
log.Printf("WARNING: %v", e)<|fim▁hole|> case *ecdsa.PublicKey:
params := *(pkType.Params())
if params != *elliptic.P256().Params() {
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
if !(allowVerificationWithNonCompliantKeys) {
return nil, e
}
log.Printf("WARNING: %v", e)
}
default:
return nil, fmt.Errorf("Unsupported public key type %v", pkType)
}
return &SignatureVerifier{
pubKey: pk,
}, nil
}
// verifySignature verifies that the passed in signature over data was created by our PublicKey.
// Currently, only SHA256 is supported as a HashAlgorithm, and only ECDSA and RSA signatures are supported.
func (s SignatureVerifier) verifySignature(data []byte, sig DigitallySigned) error {
if sig.HashAlgorithm != SHA256 {
return fmt.Errorf("unsupported HashAlgorithm in signature: %v", sig.HashAlgorithm)
}
hasherType := crypto.SHA256
hasher := hasherType.New()
if _, err := hasher.Write(data); err != nil {
return fmt.Errorf("failed to write to hasher: %v", err)
}
hash := hasher.Sum([]byte{})
switch sig.SignatureAlgorithm {
case RSA:
rsaKey, ok := s.pubKey.(*rsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify RSA signature with %T key", s.pubKey)
}
if err := rsa.VerifyPKCS1v15(rsaKey, hasherType, hash, sig.Signature); err != nil {
return fmt.Errorf("failed to verify rsa signature: %v", err)
}
case ECDSA:
ecdsaKey, ok := s.pubKey.(*ecdsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify ECDSA signature with %T key", s.pubKey)
}
var ecdsaSig struct {
R, S *big.Int
}
rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
if err != nil {
return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
}
if len(rest) != 0 {
log.Printf("Garbage following signature %v", rest)
}
if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
return errors.New("failed to verify ecdsa signature")
}
default:
return fmt.Errorf("unsupported signature type %v", sig.SignatureAlgorithm)
}
return nil
}
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry
func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
sctData, err := SerializeSCTSignatureInput(sct, entry)
if err != nil {
return err
}
return s.verifySignature(sctData, sct.Signature)
}
// VerifySTHSignature verifies that the STH's signature is valid.
func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
sthData, err := SerializeSTHSignatureInput(sth)
if err != nil {
return err
}
return s.verifySignature(sthData, sth.TreeHeadSignature)
}<|fim▁end|>
|
}
|
<|file_name|>email_errors.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
import traceback
import time
from google.appengine.api import app_identity, mail, capabilities
from google.appengine.runtime import DeadlineExceededError
from tekton.gae.middleware import Middleware
from tekton.router import PathNotFound
def get_apis_statuses(e):
if not isinstance(e, DeadlineExceededError):
return {}
t1 = time.time()
statuses = {
'blobstore': capabilities.CapabilitySet('blobstore').is_enabled(),
'datastore_v3': capabilities.CapabilitySet('datastore_v3').is_enabled(),
'datastore_v3_write': capabilities.CapabilitySet('datastore_v3', ['write']).is_enabled(),
'images': capabilities.CapabilitySet('images').is_enabled(),
'mail': capabilities.CapabilitySet('mail').is_enabled(),
'memcache': capabilities.CapabilitySet('memcache').is_enabled(),
'taskqueue': capabilities.CapabilitySet('taskqueue').is_enabled(),
'urlfetch': capabilities.CapabilitySet('urlfetch').is_enabled(),
}
t2 = time.time()
statuses['time'] = t2 - t1
return statuses
def send_error_to_admins(settings, exception, handler, render, template):
tb = traceback.format_exc()
errmsg = exception.message
logging.error(errmsg)
logging.error(tb)
handler.response.write(render(template))
appid = app_identity.get_application_id()
subject = 'ERROR in %s: [%s] %s' % (appid, handler.request.path, errmsg)
body = """
------------- request ------------
%s
----------------------------------
------------- GET params ---------
%s
----------------------------------
----------- POST params ----------
%s
----------------------------------
----------- traceback ------------
%s
----------------------------------
""" % (handler.request, handler.request.GET, handler.request.POST, tb)
body += 'API statuses = ' + json.dumps(get_apis_statuses(exception), indent=4)
mail.send_mail_to_admins(sender=settings.SENDER_EMAIL,
subject=subject,
body=body)
class EmailMiddleware(Middleware):
def handle_error(self, exception):<|fim▁hole|> send_error_to_admins(settings, exception, self.handler, self.dependencies['_render'],
settings.TEMPLATE_404_ERROR)
else:
self.handler.response.set_status(400)
send_error_to_admins(settings, exception, self.handler, self.dependencies['_render'],
settings.TEMPLATE_400_ERROR)<|fim▁end|>
|
import settings # workaround. See https://github.com/renzon/zenwarch/issues/3
if isinstance(exception, PathNotFound):
self.handler.response.set_status(404)
|
<|file_name|>sanitycheck.py<|end_file_name|><|fim▁begin|># jhbuild - a build script for GNOME 1.x and 2.x
# Copyright (C) 2001-2006 James Henstridge
#
# sanitycheck.py: check whether build environment is sane
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os<|fim▁hole|>import re
from jhbuild.commands import Command, register_command
from jhbuild.utils.cmds import get_output, check_version
from jhbuild.errors import UsageError, CommandError
def get_aclocal_path():
data = get_output(['aclocal', '--print-ac-dir'])
path = [data[:-1]]
env = os.environ.get('ACLOCAL_FLAGS', '').split()
i = 0
while i < len(env):
if env[i] == '-I':
path.append(env[i+1])
i = i + 2
else:
i = i + 1
return path
def inpath(filename, path):
for dir in path:
if os.path.isfile(os.path.join(dir, filename)):
return True
# also check for filename.exe on Windows
if sys.platform.startswith('win') and os.path.isfile(os.path.join(dir, filename + '.exe')):
return True
return False
class cmd_sanitycheck(Command):
doc = N_('Check that required support tools are available')
name = 'sanitycheck'
usage_args = ''
def run(self, config, options, args, help=None):
if args:
raise UsageError(_('no extra arguments expected'))
# check whether the checkout root and install prefix are writable
if not (os.path.isdir(config.checkoutroot) and
os.access(config.checkoutroot, os.R_OK|os.W_OK|os.X_OK)):
uprint(_('checkout root (%s) is not writable') % config.checkoutroot)
if not (os.path.isdir(config.prefix) and
os.access(config.prefix, os.R_OK|os.W_OK|os.X_OK)):
uprint(_('install prefix (%s) is not writable') % config.prefix)
# check whether various tools are installed
if not check_version(['libtoolize', '--version'],
r'libtoolize \([^)]*\) ([\d.]+)', '1.5'):
uprint(_('%s not found') % 'libtool >= 1.5')
if not check_version(['gettext', '--version'],
r'gettext \([^)]*\) ([\d.]+)', '0.10.40'):
uprint(_('%s not found') % 'gettext >= 0.10.40')
if not check_version(['pkg-config', '--version'],
r'^([\d.]+)', '0.14.0'):
uprint(_('%s not found') % 'pkg-config >= 0.14.0')
if not check_version(['autoconf', '--version'],
r'autoconf \([^)]*\) ([\d.]+)', '2.53'):
uprint(_('%s not found') % 'autoconf >= 2.53')
if not check_version(['automake', '--version'],
r'automake \([^)]*\) ([\d.]+)', '1.10'):
uprint(_('%s not found') % 'automake >= 1.10')
try:
not_in_path = []
path = get_aclocal_path()
macros = ['libtool.m4', 'gettext.m4', 'pkg.m4']
for macro in macros:
if not inpath (macro, path):
uprint(_("aclocal can't see %s macros") % (macro.split('.m4')[0]))
if not_in_path.count(macro) == 0:
not_in_path.append(macro)
if len(not_in_path) > 0:
uprint(_("Please copy the lacking macros (%s) in one of the following paths: %s"
% (', '.join(not_in_path), ', '.join(path))))
except CommandError, exc:
uprint(str(exc))
# XML catalog sanity checks
if not os.access('/etc/xml/catalog', os.R_OK):
uprint(_('Could not find XML catalog'))
else:
for (item, name) in [('-//OASIS//DTD DocBook XML V4.1.2//EN',
'DocBook XML DTD V4.1.2'),
('http://docbook.sourceforge.net/release/xsl/current/html/chunk.xsl',
'DocBook XSL Stylesheets')]:
try:
data = get_output(['xmlcatalog', '/etc/xml/catalog', item])
except:
uprint(_('Could not find %s in XML catalog') % name )
# Perl modules used by tools such as intltool:
for perlmod in [ 'XML::Parser' ]:
try:
get_output(['perl', '-M%s' % perlmod, '-e', 'exit'])
except:
uprint(_('Could not find the perl module %s') % perlmod)
# check for cvs:
if not inpath('cvs', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'cvs')
# check for svn:
if not inpath('svn', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'svn')
if not (inpath('curl', os.environ['PATH'].split(os.pathsep)) or
inpath('wget', os.environ['PATH'].split(os.pathsep))):
uprint(_('%s or %s not found') % ('curl', 'wget'))
# check for git:
if not inpath('git', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'git')
else:
try:
git_help = os.popen('git --help', 'r').read()
if not 'clone' in git_help:
uprint(_('Installed git program is not the right git'))
else:
if not check_version(['git', '--version'],
r'git version ([\d.]+)', '1.5.6'):
uprint(_('%s not found') % 'git >= 1.5.6')
except:
uprint(_('Could not check git program'))
# check for flex/bison:
if not inpath('flex', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'flex')
if not inpath('bison', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'bison')
if not inpath('xzcat', os.environ['PATH'].split(os.pathsep)):
uprint(_('%s not found') % 'xzcat')
register_command(cmd_sanitycheck)<|fim▁end|>
| |
<|file_name|>slice.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Slice management and manipulation
//!
//! For more details `std::slice`.
#![stable(feature = "rust1", since = "1.0.0")]
#![doc(primitive = "slice")]
// How this module is organized.
//
// The library infrastructure for slices is fairly messy. There's
// a lot of stuff defined here. Let's keep it clean.
//
// Since slices don't support inherent methods; all operations
// on them are defined on traits, which are then reexported from
// the prelude for convenience. So there are a lot of traits here.
//
// The layout of this file is thus:
//
// * Slice-specific 'extension' traits and their implementations. This
// is where most of the slice API resides.
// * Implementations of a few common traits with important slice ops.
// * Definitions of a bunch of iterators.
// * Free functions.
// * The `raw` and `bytes` submodules.
// * Boilerplate trait implementations.
use mem::transmute;
use clone::Clone;
use cmp::{Ordering, PartialEq, PartialOrd, Eq, Ord};
use cmp::Ordering::{Less, Equal, Greater};
use cmp;
use default::Default;
use intrinsics::assume;
use iter::*;
use ops::{FnMut, self, Index};
use ops::RangeFull;
use option::Option;
use option::Option::{None, Some};
use result::Result;
use result::Result::{Ok, Err};
use ptr;
use ptr::PtrExt;
use mem;
use mem::size_of;
use marker::{Send, Sized, Sync, self};
use raw::Repr;
// Avoid conflicts with *both* the Slice trait (buggy) and the `slice::raw` module.
use raw::Slice as RawSlice;
//
// Extension traits
//
/// Extension methods for slices.
#[allow(missing_docs)] // docs in libcollections
pub trait SliceExt {
type Item;
fn split_at<'a>(&'a self, mid: usize) -> (&'a [Self::Item], &'a [Self::Item]);
fn iter<'a>(&'a self) -> Iter<'a, Self::Item>;
fn split<'a, P>(&'a self, pred: P) -> Split<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn splitn<'a, P>(&'a self, n: usize, pred: P) -> SplitN<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn rsplitn<'a, P>(&'a self, n: usize, pred: P) -> RSplitN<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn windows<'a>(&'a self, size: usize) -> Windows<'a, Self::Item>;
fn chunks<'a>(&'a self, size: usize) -> Chunks<'a, Self::Item>;
fn get<'a>(&'a self, index: usize) -> Option<&'a Self::Item>;
fn first<'a>(&'a self) -> Option<&'a Self::Item>;
fn tail<'a>(&'a self) -> &'a [Self::Item];
fn init<'a>(&'a self) -> &'a [Self::Item];
fn last<'a>(&'a self) -> Option<&'a Self::Item>;
unsafe fn get_unchecked<'a>(&'a self, index: usize) -> &'a Self::Item;
fn as_ptr(&self) -> *const Self::Item;
fn binary_search_by<F>(&self, f: F) -> Result<usize, usize> where
F: FnMut(&Self::Item) -> Ordering;
fn len(&self) -> usize;
fn is_empty(&self) -> bool { self.len() == 0 }
fn get_mut<'a>(&'a mut self, index: usize) -> Option<&'a mut Self::Item>;
fn as_mut_slice<'a>(&'a mut self) -> &'a mut [Self::Item];
fn iter_mut<'a>(&'a mut self) -> IterMut<'a, Self::Item>;
fn first_mut<'a>(&'a mut self) -> Option<&'a mut Self::Item>;
fn tail_mut<'a>(&'a mut self) -> &'a mut [Self::Item];
fn init_mut<'a>(&'a mut self) -> &'a mut [Self::Item];
fn last_mut<'a>(&'a mut self) -> Option<&'a mut Self::Item>;
fn split_mut<'a, P>(&'a mut self, pred: P) -> SplitMut<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn splitn_mut<P>(&mut self, n: usize, pred: P) -> SplitNMut<Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn rsplitn_mut<P>(&mut self, n: usize, pred: P) -> RSplitNMut<Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn chunks_mut<'a>(&'a mut self, chunk_size: usize) -> ChunksMut<'a, Self::Item>;
fn swap(&mut self, a: usize, b: usize);
fn split_at_mut<'a>(&'a mut self, mid: usize) -> (&'a mut [Self::Item], &'a mut [Self::Item]);
fn reverse(&mut self);
unsafe fn get_unchecked_mut<'a>(&'a mut self, index: usize) -> &'a mut Self::Item;
fn as_mut_ptr(&mut self) -> *mut Self::Item;
fn position_elem(&self, t: &Self::Item) -> Option<usize> where Self::Item: PartialEq;
fn rposition_elem(&self, t: &Self::Item) -> Option<usize> where Self::Item: PartialEq;
fn contains(&self, x: &Self::Item) -> bool where Self::Item: PartialEq;
fn starts_with(&self, needle: &[Self::Item]) -> bool where Self::Item: PartialEq;
fn ends_with(&self, needle: &[Self::Item]) -> bool where Self::Item: PartialEq;
fn binary_search(&self, x: &Self::Item) -> Result<usize, usize> where Self::Item: Ord;
fn next_permutation(&mut self) -> bool where Self::Item: Ord;
fn prev_permutation(&mut self) -> bool where Self::Item: Ord;
fn clone_from_slice(&mut self, &[Self::Item]) -> usize where Self::Item: Clone;
}
#[unstable(feature = "core")]
impl<T> SliceExt for [T] {
type Item = T;
#[inline]
fn split_at(&self, mid: usize) -> (&[T], &[T]) {
(&self[..mid], &self[mid..])
}
#[inline]
fn iter<'a>(&'a self) -> Iter<'a, T> {
unsafe {
let p = self.as_ptr();
assume(!p.is_null());
if mem::size_of::<T>() == 0 {
Iter {ptr: p,
end: (p as usize + self.len()) as *const T,
_marker: marker::PhantomData}
} else {
Iter {ptr: p,
end: p.offset(self.len() as isize),
_marker: marker::PhantomData}
}
}
}
#[inline]
fn split<'a, P>(&'a self, pred: P) -> Split<'a, T, P> where P: FnMut(&T) -> bool {
Split {
v: self,
pred: pred,
finished: false
}
}
#[inline]
fn splitn<'a, P>(&'a self, n: usize, pred: P) -> SplitN<'a, T, P> where
P: FnMut(&T) -> bool,
{
SplitN {
inner: GenericSplitN {
iter: self.split(pred),
count: n,
invert: false
}
}
}
#[inline]
fn rsplitn<'a, P>(&'a self, n: usize, pred: P) -> RSplitN<'a, T, P> where
P: FnMut(&T) -> bool,
{
RSplitN {
inner: GenericSplitN {
iter: self.split(pred),
count: n,
invert: true
}
}
}
#[inline]
fn windows(&self, size: usize) -> Windows<T> {
assert!(size != 0);
Windows { v: self, size: size }
}
#[inline]
fn chunks(&self, size: usize) -> Chunks<T> {
assert!(size != 0);
Chunks { v: self, size: size }
}
#[inline]
fn get(&self, index: usize) -> Option<&T> {
if index < self.len() { Some(&self[index]) } else { None }
}
#[inline]
fn first(&self) -> Option<&T> {
if self.len() == 0 { None } else { Some(&self[0]) }
}
#[inline]
fn tail(&self) -> &[T] { &self[1..] }
#[inline]
fn init(&self) -> &[T] {
&self[..self.len() - 1]
}
#[inline]
fn last(&self) -> Option<&T> {
if self.len() == 0 { None } else { Some(&self[self.len() - 1]) }
}
#[inline]
unsafe fn get_unchecked(&self, index: usize) -> &T {
transmute(self.repr().data.offset(index as isize))
}
#[inline]
fn as_ptr(&self) -> *const T {
self.repr().data
}
#[unstable(feature = "core")]
fn binary_search_by<F>(&self, mut f: F) -> Result<usize, usize> where
F: FnMut(&T) -> Ordering
{
let mut base : usize = 0;
let mut lim : usize = self.len();
while lim != 0 {
let ix = base + (lim >> 1);
match f(&self[ix]) {
Equal => return Ok(ix),
Less => {
base = ix + 1;
lim -= 1;
}
Greater => ()
}
lim >>= 1;
}
Err(base)
}
#[inline]
fn len(&self) -> usize { self.repr().len }
#[inline]
fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len() { Some(&mut self[index]) } else { None }
}
#[inline]
fn as_mut_slice(&mut self) -> &mut [T] { self }
#[inline]
fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
unsafe {
let self2: &mut [T] = mem::transmute_copy(&self);
(ops::IndexMut::index_mut(self, &ops::RangeTo { end: mid } ),
ops::IndexMut::index_mut(self2, &ops::RangeFrom { start: mid } ))
}
}
#[inline]
fn iter_mut<'a>(&'a mut self) -> IterMut<'a, T> {
unsafe {
let p = self.as_mut_ptr();
assume(!p.is_null());
if mem::size_of::<T>() == 0 {
IterMut {ptr: p,
end: (p as usize + self.len()) as *mut T,
_marker: marker::PhantomData}
} else {
IterMut {ptr: p,
end: p.offset(self.len() as isize),
_marker: marker::PhantomData}
}
}
}
#[inline]
fn last_mut(&mut self) -> Option<&mut T> {
let len = self.len();
if len == 0 { return None; }
Some(&mut self[len - 1])
}
#[inline]
fn first_mut(&mut self) -> Option<&mut T> {
if self.len() == 0 { None } else { Some(&mut self[0]) }
}
#[inline]
fn tail_mut(&mut self) -> &mut [T] {
&mut self[1 ..]
}
#[inline]
fn init_mut(&mut self) -> &mut [T] {
let len = self.len();
&mut self[.. (len - 1)]
}<|fim▁hole|> #[inline]
fn split_mut<'a, P>(&'a mut self, pred: P) -> SplitMut<'a, T, P> where P: FnMut(&T) -> bool {
SplitMut { v: self, pred: pred, finished: false }
}
#[inline]
fn splitn_mut<'a, P>(&'a mut self, n: usize, pred: P) -> SplitNMut<'a, T, P> where
P: FnMut(&T) -> bool
{
SplitNMut {
inner: GenericSplitN {
iter: self.split_mut(pred),
count: n,
invert: false
}
}
}
#[inline]
fn rsplitn_mut<'a, P>(&'a mut self, n: usize, pred: P) -> RSplitNMut<'a, T, P> where
P: FnMut(&T) -> bool,
{
RSplitNMut {
inner: GenericSplitN {
iter: self.split_mut(pred),
count: n,
invert: true
}
}
}
#[inline]
fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<T> {
assert!(chunk_size > 0);
ChunksMut { v: self, chunk_size: chunk_size }
}
#[inline]
fn swap(&mut self, a: usize, b: usize) {
unsafe {
// Can't take two mutable loans from one vector, so instead just cast
// them to their raw pointers to do the swap
let pa: *mut T = &mut self[a];
let pb: *mut T = &mut self[b];
ptr::swap(pa, pb);
}
}
fn reverse(&mut self) {
let mut i: usize = 0;
let ln = self.len();
while i < ln / 2 {
// Unsafe swap to avoid the bounds check in safe swap.
unsafe {
let pa: *mut T = self.get_unchecked_mut(i);
let pb: *mut T = self.get_unchecked_mut(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
}
#[inline]
unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T {
transmute((self.repr().data as *mut T).offset(index as isize))
}
#[inline]
fn as_mut_ptr(&mut self) -> *mut T {
self.repr().data as *mut T
}
#[inline]
fn position_elem(&self, x: &T) -> Option<usize> where T: PartialEq {
self.iter().position(|y| *x == *y)
}
#[inline]
fn rposition_elem(&self, t: &T) -> Option<usize> where T: PartialEq {
self.iter().rposition(|x| *x == *t)
}
#[inline]
fn contains(&self, x: &T) -> bool where T: PartialEq {
self.iter().any(|elt| *x == *elt)
}
#[inline]
fn starts_with(&self, needle: &[T]) -> bool where T: PartialEq {
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
#[inline]
fn ends_with(&self, needle: &[T]) -> bool where T: PartialEq {
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m-n..]
}
#[unstable(feature = "core")]
fn binary_search(&self, x: &T) -> Result<usize, usize> where T: Ord {
self.binary_search_by(|p| p.cmp(x))
}
#[unstable(feature = "core")]
fn next_permutation(&mut self) -> bool where T: Ord {
// These cases only have 1 permutation each, so we can't do anything.
if self.len() < 2 { return false; }
// Step 1: Identify the longest, rightmost weakly decreasing part of the vector
let mut i = self.len() - 1;
while i > 0 && self[i-1] >= self[i] {
i -= 1;
}
// If that is the entire vector, this is the last-ordered permutation.
if i == 0 {
return false;
}
// Step 2: Find the rightmost element larger than the pivot (i-1)
let mut j = self.len() - 1;
while j >= i && self[j] <= self[i-1] {
j -= 1;
}
// Step 3: Swap that element with the pivot
self.swap(j, i-1);
// Step 4: Reverse the (previously) weakly decreasing part
self[i..].reverse();
true
}
#[unstable(feature = "core")]
fn prev_permutation(&mut self) -> bool where T: Ord {
// These cases only have 1 permutation each, so we can't do anything.
if self.len() < 2 { return false; }
// Step 1: Identify the longest, rightmost weakly increasing part of the vector
let mut i = self.len() - 1;
while i > 0 && self[i-1] <= self[i] {
i -= 1;
}
// If that is the entire vector, this is the first-ordered permutation.
if i == 0 {
return false;
}
// Step 2: Reverse the weakly increasing part
self[i..].reverse();
// Step 3: Find the rightmost element equal to or bigger than the pivot (i-1)
let mut j = self.len() - 1;
while j >= i && self[j-1] < self[i-1] {
j -= 1;
}
// Step 4: Swap that element with the pivot
self.swap(i-1, j);
true
}
#[inline]
fn clone_from_slice(&mut self, src: &[T]) -> usize where T: Clone {
let min = cmp::min(self.len(), src.len());
let dst = &mut self[.. min];
let src = &src[.. min];
for i in 0..min {
dst[i].clone_from(&src[i]);
}
min
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::Index<usize> for [T] {
type Output = T;
fn index(&self, &index: &usize) -> &T {
assert!(index < self.len());
unsafe { mem::transmute(self.repr().data.offset(index as isize)) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::IndexMut<usize> for [T] {
fn index_mut(&mut self, &index: &usize) -> &mut T {
assert!(index < self.len());
unsafe { mem::transmute(self.repr().data.offset(index as isize)) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::Index<ops::Range<usize>> for [T] {
type Output = [T];
#[inline]
fn index(&self, index: &ops::Range<usize>) -> &[T] {
assert!(index.start <= index.end);
assert!(index.end <= self.len());
unsafe {
transmute(RawSlice {
data: self.as_ptr().offset(index.start as isize),
len: index.end - index.start
})
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::Index<ops::RangeTo<usize>> for [T] {
type Output = [T];
#[inline]
fn index(&self, index: &ops::RangeTo<usize>) -> &[T] {
self.index(&ops::Range{ start: 0, end: index.end })
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::Index<ops::RangeFrom<usize>> for [T] {
type Output = [T];
#[inline]
fn index(&self, index: &ops::RangeFrom<usize>) -> &[T] {
self.index(&ops::Range{ start: index.start, end: self.len() })
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::Index<RangeFull> for [T] {
type Output = [T];
#[inline]
fn index(&self, _index: &RangeFull) -> &[T] {
self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::IndexMut<ops::Range<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: &ops::Range<usize>) -> &mut [T] {
assert!(index.start <= index.end);
assert!(index.end <= self.len());
unsafe {
transmute(RawSlice {
data: self.as_ptr().offset(index.start as isize),
len: index.end - index.start
})
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::IndexMut<ops::RangeTo<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: &ops::RangeTo<usize>) -> &mut [T] {
self.index_mut(&ops::Range{ start: 0, end: index.end })
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::IndexMut<ops::RangeFrom<usize>> for [T] {
#[inline]
fn index_mut(&mut self, index: &ops::RangeFrom<usize>) -> &mut [T] {
let len = self.len();
self.index_mut(&ops::Range{ start: index.start, end: len })
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ops::IndexMut<RangeFull> for [T] {
#[inline]
fn index_mut(&mut self, _index: &RangeFull) -> &mut [T] {
self
}
}
////////////////////////////////////////////////////////////////////////////////
// Common traits
////////////////////////////////////////////////////////////////////////////////
/// Data that is viewable as a slice.
#[unstable(feature = "core",
reason = "will be replaced by slice syntax")]
pub trait AsSlice<T> {
/// Work with `self` as a slice.
fn as_slice<'a>(&'a self) -> &'a [T];
}
#[unstable(feature = "core", reason = "trait is experimental")]
impl<T> AsSlice<T> for [T] {
#[inline(always)]
fn as_slice<'a>(&'a self) -> &'a [T] { self }
}
#[unstable(feature = "core", reason = "trait is experimental")]
impl<'a, T, U: ?Sized + AsSlice<T>> AsSlice<T> for &'a U {
#[inline(always)]
fn as_slice(&self) -> &[T] { AsSlice::as_slice(*self) }
}
#[unstable(feature = "core", reason = "trait is experimental")]
impl<'a, T, U: ?Sized + AsSlice<T>> AsSlice<T> for &'a mut U {
#[inline(always)]
fn as_slice(&self) -> &[T] { AsSlice::as_slice(*self) }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Default for &'a [T] {
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> &'a [T] { &[] }
}
//
// Iterators
//
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a [T] {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut [T] {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
// The shared definition of the `Iter` and `IterMut` iterators
macro_rules! iterator {
(struct $name:ident -> $ptr:ty, $elem:ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for $name<'a, T> {
type Item = $elem;
#[inline]
fn next(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
unsafe {
::intrinsics::assume(!self.ptr.is_null());
::intrinsics::assume(!self.end.is_null());
if self.ptr == self.end {
None
} else {
if mem::size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
self.ptr = transmute(self.ptr as usize + 1);
// Use a non-null pointer value
Some(&mut *(1 as *mut _))
} else {
let old = self.ptr;
self.ptr = self.ptr.offset(1);
Some(transmute(old))
}
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let diff = (self.end as usize) - (self.ptr as usize);
let size = mem::size_of::<T>();
let exact = diff / (if size == 0 {1} else {size});
(exact, Some(exact))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for $name<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
unsafe {
::intrinsics::assume(!self.ptr.is_null());
::intrinsics::assume(!self.end.is_null());
if self.end == self.ptr {
None
} else {
if mem::size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
self.end = transmute(self.end as usize - 1);
// Use a non-null pointer value
Some(&mut *(1 as *mut _))
} else {
self.end = self.end.offset(-1);
Some(transmute(self.end))
}
}
}
}
}
}
}
macro_rules! make_slice {
($t: ty => $result: ty: $start: expr, $end: expr) => {{
let diff = $end as usize - $start as usize;
let len = if mem::size_of::<T>() == 0 {
diff
} else {
diff / mem::size_of::<$t>()
};
unsafe {
transmute::<_, $result>(RawSlice { data: $start, len: len })
}
}}
}
/// Immutable slice iterator
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ptr: *const T,
end: *const T,
_marker: marker::PhantomData<&'a T>,
}
unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {}
unsafe impl<'a, T: Sync> Send for Iter<'a, T> {}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<ops::Range<usize>> for Iter<'a, T> {
type Output = [T];
#[inline]
fn index(&self, index: &ops::Range<usize>) -> &[T] {
self.as_slice().index(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<ops::RangeTo<usize>> for Iter<'a, T> {
type Output = [T];
#[inline]
fn index(&self, index: &ops::RangeTo<usize>) -> &[T] {
self.as_slice().index(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<ops::RangeFrom<usize>> for Iter<'a, T> {
type Output = [T];
#[inline]
fn index(&self, index: &ops::RangeFrom<usize>) -> &[T] {
self.as_slice().index(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<RangeFull> for Iter<'a, T> {
type Output = [T];
#[inline]
fn index(&self, _index: &RangeFull) -> &[T] {
self.as_slice()
}
}
impl<'a, T> Iter<'a, T> {
/// View the underlying data as a subslice of the original data.
///
/// This has the same lifetime as the original slice, and so the
/// iterator can continue to be used while this exists.
#[unstable(feature = "core")]
pub fn as_slice(&self) -> &'a [T] {
make_slice!(T => &'a [T]: self.ptr, self.end)
}
}
iterator!{struct Iter -> *const T, &'a T}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> { Iter { ptr: self.ptr, end: self.end, _marker: self._marker } }
}
#[unstable(feature = "core", reason = "trait is experimental")]
impl<'a, T> RandomAccessIterator for Iter<'a, T> {
#[inline]
fn indexable(&self) -> usize {
let (exact, _) = self.size_hint();
exact
}
#[inline]
fn idx(&mut self, index: usize) -> Option<&'a T> {
unsafe {
if index < self.indexable() {
if mem::size_of::<T>() == 0 {
// Use a non-null pointer value
Some(&mut *(1 as *mut _))
} else {
Some(transmute(self.ptr.offset(index as isize)))
}
} else {
None
}
}
}
}
/// Mutable slice iterator.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ptr: *mut T,
end: *mut T,
_marker: marker::PhantomData<&'a mut T>,
}
unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {}
unsafe impl<'a, T: Send> Send for IterMut<'a, T> {}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<ops::Range<usize>> for IterMut<'a, T> {
type Output = [T];
#[inline]
fn index(&self, index: &ops::Range<usize>) -> &[T] {
self.index(&RangeFull).index(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<ops::RangeTo<usize>> for IterMut<'a, T> {
type Output = [T];
#[inline]
fn index(&self, index: &ops::RangeTo<usize>) -> &[T] {
self.index(&RangeFull).index(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<ops::RangeFrom<usize>> for IterMut<'a, T> {
type Output = [T];
#[inline]
fn index(&self, index: &ops::RangeFrom<usize>) -> &[T] {
self.index(&RangeFull).index(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::Index<RangeFull> for IterMut<'a, T> {
type Output = [T];
#[inline]
fn index(&self, _index: &RangeFull) -> &[T] {
make_slice!(T => &[T]: self.ptr, self.end)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::IndexMut<ops::Range<usize>> for IterMut<'a, T> {
#[inline]
fn index_mut(&mut self, index: &ops::Range<usize>) -> &mut [T] {
self.index_mut(&RangeFull).index_mut(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::IndexMut<ops::RangeTo<usize>> for IterMut<'a, T> {
#[inline]
fn index_mut(&mut self, index: &ops::RangeTo<usize>) -> &mut [T] {
self.index_mut(&RangeFull).index_mut(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::IndexMut<ops::RangeFrom<usize>> for IterMut<'a, T> {
#[inline]
fn index_mut(&mut self, index: &ops::RangeFrom<usize>) -> &mut [T] {
self.index_mut(&RangeFull).index_mut(index)
}
}
#[unstable(feature = "core")]
impl<'a, T> ops::IndexMut<RangeFull> for IterMut<'a, T> {
#[inline]
fn index_mut(&mut self, _index: &RangeFull) -> &mut [T] {
make_slice!(T => &mut [T]: self.ptr, self.end)
}
}
impl<'a, T> IterMut<'a, T> {
/// View the underlying data as a subslice of the original data.
///
/// To avoid creating `&mut` references that alias, this is forced
/// to consume the iterator. Consider using the `Slice` and
/// `SliceMut` implementations for obtaining slices with more
/// restricted lifetimes that do not consume the iterator.
#[unstable(feature = "core")]
pub fn into_slice(self) -> &'a mut [T] {
make_slice!(T => &'a mut [T]: self.ptr, self.end)
}
}
iterator!{struct IterMut -> *mut T, &'a mut T}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {}
/// An internal abstraction over the splitting iterators, so that
/// splitn, splitn_mut etc can be implemented once.
trait SplitIter: DoubleEndedIterator {
/// Mark the underlying iterator as complete, extracting the remaining
/// portion of the slice.
fn finish(&mut self) -> Option<Self::Item>;
}
/// An iterator over subslices separated by elements that match a predicate
/// function.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Split<'a, T:'a, P> where P: FnMut(&T) -> bool {
v: &'a [T],
pred: P,
finished: bool
}
// FIXME(#19839) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Clone for Split<'a, T, P> where P: Clone + FnMut(&T) -> bool {
fn clone(&self) -> Split<'a, T, P> {
Split {
v: self.v,
pred: self.pred.clone(),
finished: self.finished,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for Split<'a, T, P> where P: FnMut(&T) -> bool {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.finished { return None; }
match self.v.iter().position(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[..idx]);
self.v = &self.v[idx + 1..];
ret
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
(1, Some(self.v.len() + 1))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for Split<'a, T, P> where P: FnMut(&T) -> bool {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.finished { return None; }
match self.v.iter().rposition(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[idx + 1..]);
self.v = &self.v[..idx];
ret
}
}
}
}
impl<'a, T, P> SplitIter for Split<'a, T, P> where P: FnMut(&T) -> bool {
#[inline]
fn finish(&mut self) -> Option<&'a [T]> {
if self.finished { None } else { self.finished = true; Some(self.v) }
}
}
/// An iterator over the subslices of the vector which are separated
/// by elements that match `pred`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitMut<'a, T:'a, P> where P: FnMut(&T) -> bool {
v: &'a mut [T],
pred: P,
finished: bool
}
impl<'a, T, P> SplitIter for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {
#[inline]
fn finish(&mut self) -> Option<&'a mut [T]> {
if self.finished {
None
} else {
self.finished = true;
Some(mem::replace(&mut self.v, &mut []))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> Iterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.finished { return None; }
let idx_opt = { // work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().position(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = &mut tail[1..];
Some(head)
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.finished {
(0, Some(0))
} else {
// if the predicate doesn't match anything, we yield one slice
// if it matches every element, we yield len+1 empty slices.
(1, Some(self.v.len() + 1))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P> where
P: FnMut(&T) -> bool,
{
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.finished { return None; }
let idx_opt = { // work around borrowck limitations
let pred = &mut self.pred;
self.v.iter().rposition(|x| (*pred)(x))
};
match idx_opt {
None => self.finish(),
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
Some(&mut tail[1..])
}
}
}
}
/// An private iterator over subslices separated by elements that
/// match a predicate function, splitting at most a fixed number of
/// times.
struct GenericSplitN<I> {
iter: I,
count: usize,
invert: bool
}
impl<T, I: SplitIter<Item=T>> Iterator for GenericSplitN<I> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
if self.count == 0 {
self.iter.finish()
} else {
self.count -= 1;
if self.invert { self.iter.next_back() } else { self.iter.next() }
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper_opt) = self.iter.size_hint();
(lower, upper_opt.map(|upper| cmp::min(self.count + 1, upper)))
}
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitN<'a, T: 'a, P> where P: FnMut(&T) -> bool {
inner: GenericSplitN<Split<'a, T, P>>
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitN<'a, T: 'a, P> where P: FnMut(&T) -> bool {
inner: GenericSplitN<Split<'a, T, P>>
}
/// An iterator over subslices separated by elements that match a predicate
/// function, limited to a given number of splits.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SplitNMut<'a, T: 'a, P> where P: FnMut(&T) -> bool {
inner: GenericSplitN<SplitMut<'a, T, P>>
}
/// An iterator over subslices separated by elements that match a
/// predicate function, limited to a given number of splits, starting
/// from the end of the slice.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct RSplitNMut<'a, T: 'a, P> where P: FnMut(&T) -> bool {
inner: GenericSplitN<SplitMut<'a, T, P>>
}
macro_rules! forward_iterator {
($name:ident: $elem:ident, $iter_of:ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, $elem, P> Iterator for $name<'a, $elem, P> where
P: FnMut(&T) -> bool
{
type Item = $iter_of;
#[inline]
fn next(&mut self) -> Option<$iter_of> {
self.inner.next()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
}
}
forward_iterator! { SplitN: T, &'a [T] }
forward_iterator! { RSplitN: T, &'a [T] }
forward_iterator! { SplitNMut: T, &'a mut [T] }
forward_iterator! { RSplitNMut: T, &'a mut [T] }
/// An iterator over overlapping subslices of length `size`.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Windows<'a, T:'a> {
v: &'a [T],
size: usize
}
// FIXME(#19839) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Windows<'a, T> {
fn clone(&self) -> Windows<'a, T> {
Windows {
v: self.v,
size: self.size,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Windows<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[..self.size]);
self.v = &self.v[1..];
ret
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.size > self.v.len() {
(0, Some(0))
} else {
let size = self.v.len() - self.size + 1;
(size, Some(size))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Windows<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.size > self.v.len() {
None
} else {
let ret = Some(&self.v[self.v.len()-self.size..]);
self.v = &self.v[..self.v.len()-1];
ret
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Windows<'a, T> {}
#[unstable(feature = "core", reason = "trait is experimental")]
impl<'a, T> RandomAccessIterator for Windows<'a, T> {
#[inline]
fn indexable(&self) -> usize {
self.size_hint().0
}
#[inline]
fn idx(&mut self, index: usize) -> Option<&'a [T]> {
if index + self.size > self.v.len() {
None
} else {
Some(&self.v[index .. index+self.size])
}
}
}
/// An iterator over a slice in (non-overlapping) chunks (`size` elements at a
/// time).
///
/// When the slice len is not evenly divided by the chunk size, the last slice
/// of the iteration will be the remainder.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chunks<'a, T:'a> {
v: &'a [T],
size: usize
}
// FIXME(#19839) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Chunks<'a, T> {
fn clone(&self) -> Chunks<'a, T> {
Chunks {
v: self.v,
size: self.size,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Chunks<'a, T> {
type Item = &'a [T];
#[inline]
fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() == 0 {
None
} else {
let chunksz = cmp::min(self.v.len(), self.size);
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.len() == 0 {
(0, Some(0))
} else {
let n = self.v.len() / self.size;
let rem = self.v.len() % self.size;
let n = if rem > 0 { n+1 } else { n };
(n, Some(n))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Chunks<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() == 0 {
None
} else {
let remainder = self.v.len() % self.size;
let chunksz = if remainder != 0 { remainder } else { self.size };
let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
self.v = fst;
Some(snd)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Chunks<'a, T> {}
#[unstable(feature = "core", reason = "trait is experimental")]
impl<'a, T> RandomAccessIterator for Chunks<'a, T> {
#[inline]
fn indexable(&self) -> usize {
self.v.len()/self.size + if self.v.len() % self.size != 0 { 1 } else { 0 }
}
#[inline]
fn idx(&mut self, index: usize) -> Option<&'a [T]> {
if index < self.indexable() {
let lo = index * self.size;
let mut hi = lo + self.size;
if hi < lo || hi > self.v.len() { hi = self.v.len(); }
Some(&self.v[lo..hi])
} else {
None
}
}
}
/// An iterator over a slice in (non-overlapping) mutable chunks (`size`
/// elements at a time). When the slice len is not evenly divided by the chunk
/// size, the last slice of the iteration will be the remainder.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ChunksMut<'a, T:'a> {
v: &'a mut [T],
chunk_size: usize
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for ChunksMut<'a, T> {
type Item = &'a mut [T];
#[inline]
fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() == 0 {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.len() == 0 {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() == 0 {
None
} else {
let remainder = self.v.len() % self.chunk_size;
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {}
//
// Free functions
//
/// Converts a pointer to A into a slice of length 1 (without copying).
#[unstable(feature = "core")]
pub fn ref_slice<'a, A>(s: &'a A) -> &'a [A] {
unsafe {
transmute(RawSlice { data: s, len: 1 })
}
}
/// Converts a pointer to A into a slice of length 1 (without copying).
#[unstable(feature = "core")]
pub fn mut_ref_slice<'a, A>(s: &'a mut A) -> &'a mut [A] {
unsafe {
let ptr: *const A = transmute(s);
transmute(RawSlice { data: ptr, len: 1 })
}
}
/// Forms a slice from a pointer and a length.
///
/// The `len` argument is the number of **elements**, not the number of bytes.
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `len` elements, nor whether the lifetime inferred is a suitable
/// lifetime for the returned slice.
///
/// # Caveat
///
/// The lifetime for the returned slice is inferred from its usage. To
/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
/// source lifetime is safe in the context, such as by providing a helper
/// function taking the lifetime of a host value for the slice, or by explicit
/// annotation.
///
/// # Examples
///
/// ```rust
/// use std::slice;
///
/// // manifest a slice out of thin air!
/// let ptr = 0x1234 as *const usize;
/// let amt = 10;
/// unsafe {
/// let slice = slice::from_raw_parts(ptr, amt);
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] {
transmute(RawSlice { data: p, len: len })
}
/// Performs the same functionality as `from_raw_parts`, except that a mutable
/// slice is returned.
///
/// This function is unsafe for the same reasons as `from_raw_parts`, as well
/// as not being able to provide a non-aliasing guarantee of the returned
/// mutable slice.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts_mut<'a, T>(p: *mut T, len: usize) -> &'a mut [T] {
transmute(RawSlice { data: p, len: len })
}
/// Forms a slice from a pointer and a length.
///
/// The pointer given is actually a reference to the base of the slice. This
/// reference is used to give a concrete lifetime to tie the returned slice to.
/// Typically this should indicate that the slice is valid for as long as the
/// pointer itself is valid.
///
/// The `len` argument is the number of **elements**, not the number of bytes.
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `len` elements, nor whether the lifetime provided is a suitable
/// lifetime for the returned slice.
///
/// # Examples
///
/// ```rust
/// use std::slice;
///
/// // manifest a slice out of thin air!
/// let ptr = 0x1234 as *const usize;
/// let amt = 10;
/// unsafe {
/// let slice = slice::from_raw_buf(&ptr, amt);
/// }
/// ```
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use from_raw_parts")]
pub unsafe fn from_raw_buf<'a, T>(p: &'a *const T, len: usize) -> &'a [T] {
transmute(RawSlice { data: *p, len: len })
}
/// Performs the same functionality as `from_raw_buf`, except that a mutable
/// slice is returned.
///
/// This function is unsafe for the same reasons as `from_raw_buf`, as well as
/// not being able to provide a non-aliasing guarantee of the returned mutable
/// slice.
#[inline]
#[unstable(feature = "core")]
#[deprecated(since = "1.0.0",
reason = "use from_raw_parts_mut")]
pub unsafe fn from_raw_mut_buf<'a, T>(p: &'a *mut T, len: usize) -> &'a mut [T] {
transmute(RawSlice { data: *p, len: len })
}
//
// Submodules
//
/// Operations on `[u8]`.
#[unstable(feature = "core", reason = "needs review")]
pub mod bytes {
use ptr;
use slice::SliceExt;
/// A trait for operations on mutable `[u8]`s.
pub trait MutableByteVector {
/// Sets all bytes of the receiver to the given value.
fn set_memory(&mut self, value: u8);
}
impl MutableByteVector for [u8] {
#[inline]
fn set_memory(&mut self, value: u8) {
unsafe { ptr::write_bytes(self.as_mut_ptr(), value, self.len()) };
}
}
/// Copies data from `src` to `dst`
///
/// Panics if the length of `dst` is less than the length of `src`.
#[inline]
pub fn copy_memory(dst: &mut [u8], src: &[u8]) {
let len_src = src.len();
assert!(dst.len() >= len_src);
// `dst` is unaliasable, so we know statically it doesn't overlap
// with `src`.
unsafe {
ptr::copy_nonoverlapping(dst.as_mut_ptr(),
src.as_ptr(),
len_src);
}
}
}
//
// Boilerplate traits
//
#[stable(feature = "rust1", since = "1.0.0")]
impl<A, B> PartialEq<[B]> for [A] where A: PartialEq<B> {
fn eq(&self, other: &[B]) -> bool {
self.len() == other.len() &&
order::eq(self.iter(), other.iter())
}
fn ne(&self, other: &[B]) -> bool {
self.len() != other.len() ||
order::ne(self.iter(), other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq> Eq for [T] {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord> Ord for [T] {
fn cmp(&self, other: &[T]) -> Ordering {
order::cmp(self.iter(), other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd> PartialOrd for [T] {
#[inline]
fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
order::partial_cmp(self.iter(), other.iter())
}
#[inline]
fn lt(&self, other: &[T]) -> bool {
order::lt(self.iter(), other.iter())
}
#[inline]
fn le(&self, other: &[T]) -> bool {
order::le(self.iter(), other.iter())
}
#[inline]
fn ge(&self, other: &[T]) -> bool {
order::ge(self.iter(), other.iter())
}
#[inline]
fn gt(&self, other: &[T]) -> bool {
order::gt(self.iter(), other.iter())
}
}
/// Extension methods for slices containing integers.
#[unstable(feature = "core")]
pub trait IntSliceExt<U, S> {
/// Converts the slice to an immutable slice of unsigned integers with the same width.
fn as_unsigned<'a>(&'a self) -> &'a [U];
/// Converts the slice to an immutable slice of signed integers with the same width.
fn as_signed<'a>(&'a self) -> &'a [S];
/// Converts the slice to a mutable slice of unsigned integers with the same width.
fn as_unsigned_mut<'a>(&'a mut self) -> &'a mut [U];
/// Converts the slice to a mutable slice of signed integers with the same width.
fn as_signed_mut<'a>(&'a mut self) -> &'a mut [S];
}
macro_rules! impl_int_slice {
($u:ty, $s:ty, $t:ty) => {
#[unstable(feature = "core")]
impl IntSliceExt<$u, $s> for [$t] {
#[inline]
fn as_unsigned(&self) -> &[$u] { unsafe { transmute(self) } }
#[inline]
fn as_signed(&self) -> &[$s] { unsafe { transmute(self) } }
#[inline]
fn as_unsigned_mut(&mut self) -> &mut [$u] { unsafe { transmute(self) } }
#[inline]
fn as_signed_mut(&mut self) -> &mut [$s] { unsafe { transmute(self) } }
}
}
}
macro_rules! impl_int_slices {
($u:ty, $s:ty) => {
impl_int_slice! { $u, $s, $u }
impl_int_slice! { $u, $s, $s }
}
}
impl_int_slices! { u8, i8 }
impl_int_slices! { u16, i16 }
impl_int_slices! { u32, i32 }
impl_int_slices! { u64, i64 }
impl_int_slices! { usize, isize }<|fim▁end|>
| |
<|file_name|>git_helper.rs<|end_file_name|><|fim▁begin|>use git2::{Repository, Object, BranchType};
use semver::{Version, VersionReq};
use regex::Regex;
use inner::logger::Logger;
pub fn get_latest_commit(repo: &Repository) -> Option<String> {
match repo.head() {
Ok(r) => match r.resolve() {
Ok(ref r) => match r.target() {
Some(id) => Some(format!("{}", id)),
None => None,
},
_ => None,
},
_ => None,
}
}
pub fn get_current_branch(repo: &Repository) -> Option<String> {
match repo.branches(Some(BranchType::Local)) {
Ok(branches) => {
for b in branches {
let branch = match b {
Ok(b) => b.0,
_ => continue,
};
if branch.is_head() {
match branch.name() {
Ok(name) => match name {
Some(name_str) => return Some(name_str.to_owned()),
None => return None,
},
_ => return None,
}
}
}
None
},
_ => None,
}
}
pub fn get_latest_version(repo: &Repository, version_rule: Option<&VersionReq>) -> Option<(String, Version)> {
let mut version = None;
match repo.tag_names(None) {
Ok(tag_names) => {
let mut selected_tag = None;
let re = match Regex::new(r"^v?([0-9]+)[.]?([0-9]*)[.]?([0-9]*)([-]?.*)") {
Ok(re) => re,
_ => return version,
};
for t in tag_names.iter() {
let tag_name = match t {
Some(name) => name,
None => continue,
};
let tag_version_str = match re.captures(t.unwrap()) {
Some(caps) => format!("{}.{}.{}{}",
match caps.get(1) {
Some(c) => {
let n = c.as_str();
if n.is_empty() {
continue
} else {
n
}
},
_ => continue,
},
match caps.get(2) {
Some(c) => {
let n = c.as_str();
if n.is_empty() {
"0"
} else {
n
}
},
_ => "0",
},
match caps.get(3) {
Some(c) => {
let n = c.as_str();
if n.is_empty() {
"0"
} else {
n
}
},
_ => "0",
},
match caps.get(4) {
Some(c) => c.as_str(),
_ => "",
}),
None => continue,
};
let tag_version = match Version::parse(tag_version_str.as_str()) {
Ok(ver) => ver,
_ => continue,
};
if (version_rule.is_none() || version_rule.unwrap().matches(&tag_version)) && (selected_tag.is_none() || tag_version > selected_tag.clone().unwrap()) {
version = Some((tag_name.to_owned(), tag_version.clone()));
selected_tag = Some(tag_version);
}
}
},
_ => (),
}
version
}
pub fn get_latest_compat_version(repo: &Repository, rule_tag_name: String) -> String {
match VersionReq::parse(rule_tag_name.as_str()) {
Ok(version_rule) => match get_latest_version(repo, Some(&version_rule)) {
Some((tag_name, _)) => tag_name,
None => rule_tag_name,
},
_ => rule_tag_name,
}
}
pub fn get_revision_object(repo: &Repository, pkg_import: String, version: String, should_retry: bool, logger: Logger) -> Option<(Object, String)> {
match repo.revparse_single(version.as_str()) {
Ok(obj) => return Some((obj, version)),
Err(e) => {
if !should_retry {
return None
}<|fim▁hole|> logger.error(format!("the version of `{}` changed to `{}` due to {}", pkg_import, ver, e));
return get_revision_object(repo, pkg_import, ver, false, logger)
},
None => return None,
}
},
}
}<|fim▁end|>
|
match get_latest_commit(repo) {
Some(ver) => {
|
<|file_name|>rest.py<|end_file_name|><|fim▁begin|># This file is part of kytos.
#
# Copyright (c) 2014 by ACK Labs
#
# Authors:
# Beraldo Leal <beraldo AT acklabs DOT io>
# Gabriel von. Winckler <winckler AT acklabs DOT io>
# Gustavo Luiz Duarte <gustavo AT acklabs DOT io>
#
# kytos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kytos is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from kytos import models
from kytos.db import Session
from flask import Flask, redirect
from flask import url_for
from flask.ext.restful import abort, Resource, fields, reqparse, marshal_with, Api
from sqlalchemy.orm import subqueryload, scoped_session
from sqlalchemy.orm import exc
db_session = scoped_session(Session)
webserver = Flask(__name__)
api = Api(webserver)
@webserver.route('/')
def index():
return redirect("/static/index.html", code=302)
#Proposal /api/v1/
#topology:
# - GET (list):
# return list of nodes with attributes type (switch, nic, host),
# name, resource_uri, connections (list of nodes index)
# eg: [ {name: switch1, type: switch,
# resource_uri: /api/v1/switch/1/, connections:[1]},
# {name: host1, type: host,
# resource_uri: /api/v1/host/1/, connections:[0]}
#switch:
# - GET (list):
# return list of all switches with basic attibutes only: name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
#host:
# - GET (list):
# return list of all hosts with basic attibutes only: name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - POST (create):
# create a new object with posted attributes. return GET<ID> equivalence.
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
# - DELETE <ID> (delete):
# delete the object. no return value (except 200 response code)
#nic:
# - GET (list):
# return list of all nics with basic attibutes only: name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - POST (create):
# create a new object with posted attributes. return GET<ID> equivalence.
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
# - DELETE <ID> (delete):
# delete the object. no return value (except 200 response code)
#segment:
# - GET (list):
# return list of all network segments with basic attibutes only:
# name, resource_uri.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
# - POST (create):
# create a new object with posted attributes. return GET<ID> equivalence.
# - PATCH <ID> (edit):
# change posted attributes on the model. O2M and M2M replace all
# existent values. return GET<ID> equivalence.
# - DELETE <ID> (delete):
# delete the object. no return value (except 200 response code)
#flow:
# - GET (list):
# return list of all switches with basic attibutes only:
#resource_uri. Will allow search in future.
# - GET <ID> (show):
# return all attibutes (relations to {switch, nic, host, flow,
# segment} by resource_uri, others expanded)
#TODO: A better json serialize (with datetime)
# Marshal Templates
flow = {
'id': fields.Integer,
'created_at': fields.DateTime,
'is_active': fields.String,
'duration_sec': fields.Integer,
'in_port': fields.Integer,
'dl_src': fields.String,
'dl_dst': fields.String,
'dl_vlan': fields.Integer,
'dl_vlan_pcp': fields.Integer,
'dl_type': fields.Integer,
'nw_proto': fields.Integer,
'nw_src': fields.String,
'nw_dst': fields.String,
'nw_tos': fields.Integer,
'tp_src': fields.Integer,
'tp_dst': fields.Integer,
'packet_count': fields.Integer,
'byte_count': fields.Integer,
}
flows = {
'flows': fields.List(fields.Nested(flow)),
}
port_detail = {
'id': fields.Integer,
'port_number': fields.Integer,
'state': fields.String,
'speed': fields.Integer,
'hardware_address': fields.String,
}
switch_list = {
'id': fields.Integer,
'resource_uri': fields.Url('switch'),
'name': fields.String,
'datapath_id': fields.String
}
switch_detail = {
'id': fields.Integer,
'resource_uri': fields.Url('switch'),
'name': fields.String,
'datapath_id': fields.String,
'description': fields.String,
'manufacturer': fields.String,
'serial_number': fields.String,
'version': fields.String,
'address': fields.String,
'source_port': fields.String,
'capabilities': fields.String,
'last_seen': fields.DateTime,
'is_active': fields.String,
'ports': fields.List(fields.Nested(port_detail)),
'uplink': fields.List(fields.Nested(switch_list)),
'flows_count': fields.Integer
}
nic_list = {
'id': fields.Integer,
'resource_uri': fields.Url('nic'),
'name': fields.String,
}
nic_detail = {
'id': fields.Integer,
'resource_uri': fields.Url('nic'),<|fim▁hole|> 'last_seen': fields.DateTime,
'port': fields.Integer(attribute='port_id'),
'switch': fields.String(attribute='port.switch.name'),
'segment': fields.String(attribute='segment.name'),
}
host_list = {
'id': fields.Integer,
'resource_uri': fields.Url('host'),
'name': fields.String,
}
host_detail = {
'id': fields.Integer,
'resource_uri': fields.Url('host'),
'name': fields.String,
'description': fields.String,
'nics': fields.List(fields.Nested(nic_detail)),
}
class Topology(Resource):
# list
def get(self):
topology = { 'nodes': [], 'segments': [] }
segments = db_session.query(models.Segment).all()
for s in segments:
topology['segments'].append({'id':s.id, 'name':s.name})
nodes = topology['nodes']
switches = db_session.query(models.Switch).all()
switches_map = {}
for s in switches:
nodes.append({'name': s.name,
'resource_uri': url_for('switch', id=s.id),
'type': 'switch',
'segments': [],
'connections':[]})
switches_map[s.id] = len(nodes) - 1
hosts = db_session.query(models.Host).all()
hosts_map = {}
for h in hosts:
nodes.append({'name': h.name,
'resource_uri': url_for('host', id=h.id),
'type': 'host',
'segments': map(lambda x: x.segment_id, h.nics),
'connections':[]})
hosts_map[h.id] = len(nodes) - 1
for s in switches:
node = nodes[switches_map[s.id]]
# connect to other switches
for neighbour in s.get_neighbours():
node['connections'].append(switches_map[neighbour.id])
# connect to hosts
for nic in s.get_all_nics():
if nic.host:
# connect to the host
node['connections'].append(hosts_map[nic.host_id])
for h in hosts:
node = nodes[hosts_map[h.id]]
# connect to switch
for nic in h.nics:
if nic.port:
node['connections'].append(switches_map[nic.port.switch_id])
return topology
api.add_resource(Topology, '/api/v1/topology/')
class Switch(Resource):
# list
@marshal_with(switch_list)
def get(self):
return db_session.query(models.Switch).all()
class SwitchId(Resource):
# show
@marshal_with(switch_detail)
def get(self, id):
switch = db_session.query(models.Switch).get(id)
if not switch:
abort(404)
# populate uplink and ports
switch.uplink = switch.get_neighbours()
switch.ports = switch.ports.all()
# add flows count
switch.flows_count = switch.flows \
.filter(models.Flow.is_active == True).count()
return switch
# edit
def patch(self, id):
# TODO: save new data
return self.get(id)
api.add_resource(Switch, '/api/v1/switch/', endpoint='switches')
api.add_resource(SwitchId, '/api/v1/switch/<int:id>/', endpoint='switch')
class Flows(Resource):
@marshal_with(flows)
def get(self, id):
switch = db_session.query(models.Switch).get(id)
if not switch:
abort(404)
return {'flows': switch.flows.filter(models.Flow.is_active == True)}
api.add_resource(Flows, '/api/v1/switch/<int:id>/flows/', endpoint='flows')
class Host(Resource):
@marshal_with(host_list)
def get(self):
return db_session.query(models.Host).all()
def post(self):
pass
class HostId(Resource):
@marshal_with(host_detail)
def get(self, id):
host = db_session.query(models.Host).get(id)
if not host:
abort(404)
return host
def patch(self, id):
pass
def delete(self, id):
pass
api.add_resource(Host, '/api/v1/host/', endpoint='hosts')
api.add_resource(HostId, '/api/v1/host/<int:id>/', endpoint='host')
class NIC(Resource):
@marshal_with(nic_list)
def get(self):
return db_session.query(models.NIC).all()
def post(self):
pass
class NIC_Id(Resource):
@marshal_with(nic_detail)
def get(self, id):
nic = db_session.query(models.NIC).get(id)
if not nic:
abort(404)
return nic
def patch(self, id):
pass
def delete(self, id):
pass
api.add_resource(NIC, '/api/v1/nic/', endpoint='nics')
api.add_resource(NIC_Id, '/api/v1/nic/<int:id>/', endpoint='nic')
@webserver.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()<|fim▁end|>
|
'name': fields.String,
'description': fields.String,
'hardware_address': fields.String,
|
<|file_name|>interpolators.py<|end_file_name|><|fim▁begin|>"""
Classes for easy interpolation of trajectories and Curves.
Requires Scipy installed.
"""
import numpy as np
class Interpolator:
""" Poorman's linear interpolator, doesn't require Scipy. """
def __init__(self, tt=None, ss=None, ttss = None, left=None, right=None):
if ttss is not None:
tt, ss = zip(*ttss)
self.tt = 1.0*np.array(tt)
self.ss = 1.0*np.array(ss)
self.left = left
self.right = right
self.tmin, self.tmax = min(tt), max(tt)
def __call__(self, t):
return np.interp(t, self.tt, self.ss, self.left, self.right)
class Trajectory:
def __init__(self, tt, xx, yy):
self.tt = 1.0*np.array(tt)
self.xx = np.array(xx)
self.yy = np.array(yy)
self.update_interpolators()
def __call__(self, t):
return np.array([self.xi(t), self.yi(t)])
def addx(self, x):
return Trajectory(self.tt, self.xx+x, self.yy)
def addy(self, y):
return Trajectory(self.tt, self.xx+y, self.yy)
def update_interpolators(self):
self.xi = Interpolator(self.tt, self.xx)
self.yi = Interpolator(self.tt, self.yy)
def txy(self, tms=False):
return zip((1000 if tms else 1)*self.tt, self.xx, self.yy)
def to_file(self, filename):
np.savetxt(filename, np.array(self.txy(tms=True)),<|fim▁hole|> @staticmethod
def from_file(filename):
arr = np.loadtxt(filename, delimiter='\t')
tt, xx, yy = arr.T
return Trajectory(1.0*tt/1000, xx, yy)
@staticmethod
def save_list(trajs, filename):
N = len(trajs)
arr = np.hstack([np.array(t.txy(tms=True)) for t in trajs])
np.savetxt( filename, arr, fmt="%d", delimiter='\t',
header = "\t".join(N*['t (ms)', 'x', 'y']))
@staticmethod
def load_list(filename):
arr = np.loadtxt(filename, delimiter='\t').T
Nlines = arr.shape[0]
return [Trajectory(tt=1.0*a[0]/1000, xx=a[1], yy=a[2])
for a in np.split(arr, Nlines/3)]<|fim▁end|>
|
fmt="%d", delimiter='\t')
|
<|file_name|>test_pipeline_server.rs<|end_file_name|><|fim▁begin|>#![allow(deprecated)]
extern crate futures;
extern crate tokio_core;
extern crate tokio_proto;
extern crate tokio_service;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
use std::cell::RefCell;
use std::io;
use std::sync::Mutex;
use std::thread;
use std::time::Duration;
use futures::stream;
use futures::sync::mpsc;
use futures::sync::oneshot;
use futures::future;
use futures::{Future, Stream, Sink};
use tokio_proto::streaming::pipeline::Frame;
use tokio_proto::streaming::{Message, Body};
mod support;
use support::service::simple_service;
use support::mock;
#[test]
fn test_immediate_done() {
let service = simple_service(|_| {
future::ok(Message::WithoutBody("goodbye"))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.allow_and_assert_drop();
}
#[test]
fn test_immediate_writable_echo() {
let service = simple_service(|req: Message<&'static str, Body<u32, io::Error>>| {
assert_eq!(req, "hello");
future::finished(Message::WithoutBody(*req.get_ref()))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("hello"));
assert_eq!(mock.next_write().unwrap_msg(), "hello");
mock.allow_and_assert_drop();
}
#[test]
fn test_immediate_writable_delayed_response_echo() {
let (c, fut) = oneshot::channel();
let fut = Mutex::new(Some(fut));
let service = simple_service(move |req| {
assert_eq!(req, "hello");
fut.lock().unwrap().take().unwrap().then(|r| r.unwrap())
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("hello"));
thread::sleep(Duration::from_millis(20));
c.complete(Ok(Message::WithoutBody("goodbye")));
assert_eq!(mock.next_write().unwrap_msg(), "goodbye");
mock.allow_and_assert_drop();
}
#[test]
fn test_delayed_writable_immediate_response_echo() {
let service = simple_service(|req: Message<&'static str, Body<u32, io::Error>>| {
assert_eq!(req, "hello");
future::finished(Message::WithoutBody(*req.get_ref()))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("hello"));
thread::sleep(Duration::from_millis(20));
assert_eq!(mock.next_write().unwrap_msg(), "hello");
}
#[test]
fn test_pipelining_while_service_is_processing() {
let (tx, rx) = mpsc::unbounded();
let tx = RefCell::new(tx);
let service = simple_service(move |_| {
let (c, fut) = oneshot::channel();
mpsc::UnboundedSender::send(&mut tx.borrow_mut(), c).unwrap();
fut.then(|r| r.unwrap())
});
let (mut mock, _other) = mock::pipeline_server(service);
let mut rx = rx.wait();
mock.send(msg("hello"));
let c1 = rx.next().unwrap().unwrap();
mock.send(msg("hello"));
let c2 = rx.next().unwrap().unwrap();
mock.send(msg("hello"));
let c3 = rx.next().unwrap().unwrap();
c3.complete(Ok(Message::WithoutBody("three")));
c2.complete(Ok(Message::WithoutBody("two")));
c1.complete(Ok(Message::WithoutBody("one")));
assert_eq!("one", mock.next_write().unwrap_msg());
assert_eq!("two", mock.next_write().unwrap_msg());
assert_eq!("three", mock.next_write().unwrap_msg());
}
#[test]
fn test_pipelining_while_transport_not_writable() {
let (tx, rx) = mpsc::unbounded();
let tx = RefCell::new(tx);
let service = simple_service(move |req: Message<&'static str, Body<u32, io::Error>>| {
mpsc::UnboundedSender::send(&mut tx.borrow_mut(), req.clone()).unwrap();
future::finished(Message::WithoutBody(*req.get_ref()))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("one"));
mock.send(msg("two"));
mock.send(msg("three"));
// Assert the service received all the requests before they are written
// to the transport
let mut rx = rx.wait();
assert_eq!("one", rx.next().unwrap().unwrap());
assert_eq!("two", rx.next().unwrap().unwrap());
assert_eq!("three", rx.next().unwrap().unwrap());
assert_eq!("one", mock.next_write().unwrap_msg());
assert_eq!("two", mock.next_write().unwrap_msg());
assert_eq!("three", mock.next_write().unwrap_msg());
}
#[test]
fn test_repeatedly_flushes_messages() {
let service = simple_service(move |req: Message<&'static str, Body<u32, io::Error>>| {
future::ok(Message::WithoutBody(*req.get_ref()))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("hello"));
assert_eq!("hello", mock.next_write().unwrap_msg());
mock.allow_and_assert_drop();
}
#[test]
fn test_returning_error_from_service() {
let service = simple_service(move |_| {
future::err(io::Error::new(io::ErrorKind::Other, "nope"))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("hello"));
assert_eq!(io::ErrorKind::Other, mock.next_write().unwrap_err().kind());
mock.allow_and_assert_drop();
}
#[test]
fn test_reading_error_frame_from_transport() {
let service = simple_service(move |_| {
future::ok(Message::WithoutBody("omg no"))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(Frame::Error {
error: io::Error::new(io::ErrorKind::Other, "mock transport error frame"),
});
mock.allow_and_assert_drop();
}
#[test]
fn test_reading_io_error_from_transport() {
let service = simple_service(move |_| {
future::finished(Message::WithoutBody("omg no"))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.error(io::Error::new(io::ErrorKind::Other, "mock transport error frame"));
mock.allow_and_assert_drop();
}
#[test]
#[ignore]
fn test_reading_error_while_pipelining_from_transport() {
unimplemented!();
}
#[test]
#[ignore]
fn test_returning_would_block_from_service() {
// Because... it could happen
}
#[test]
fn test_streaming_request_body_then_responding() {
let (tx, rx) = mpsc::unbounded();
let service = simple_service(move |mut req: Message<&'static str, Body<u32, io::Error>>| {
assert_eq!(req, "omg");
let body = req.take_body().unwrap();
let mut tx = tx.clone();
body.for_each(move |chunk| {
mpsc::UnboundedSender::send(&mut tx, chunk).unwrap();
Ok(())
})
.and_then(|_| future::finished(Message::WithoutBody("hi2u")))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg_with_body("omg"));
let mut rx = rx.wait();
for i in 0..5 {
mock.send(Frame::Body { chunk: Some(i) });
assert_eq!(i, rx.next().unwrap().unwrap());
}
// Send end-of-stream notification
mock.send(Frame::Body { chunk: None });
assert_eq!(mock.next_write().unwrap_msg(), "hi2u");
mock.allow_and_assert_drop();
}
#[test]
fn test_responding_then_streaming_request_body() {
let (tx, rx) = mpsc::unbounded();
let service = simple_service(move |mut req: Message<&'static str, Body<u32, io::Error>>| {
assert_eq!(req, "omg");
let body = req.take_body().unwrap();
let mut tx = tx.clone();
thread::spawn(|| {
body.for_each(move |chunk| {
mpsc::UnboundedSender::send(&mut tx, chunk).unwrap();
Ok(())
})
.wait()
.unwrap();
});
future::finished(Message::WithoutBody("hi2u"))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg_with_body("omg"));
assert_eq!(mock.next_write().unwrap_msg(), "hi2u");
let mut rx = rx.wait();
for i in 0..5 {
mock.send(Frame::Body { chunk: Some(i) });
assert_eq!(i, rx.next().unwrap().unwrap());
}
// Send end-of-stream notification
mock.send(Frame::Body { chunk: None });
mock.allow_and_assert_drop();
}
#[test]
fn test_pipeline_stream_response_body() {
let service = simple_service(move |_| {
let body = stream::once(Ok(1u32)).boxed();
future::finished(Message::WithBody("resp", body))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("one"));
<|fim▁hole|> assert_eq!(mock.next_write().unwrap_body(), None);
mock.allow_and_assert_drop();
}
#[test]
fn test_pipeline_streaming_body_without_consuming() {
let (tx, rx) = mpsc::unbounded();
let service = simple_service(move |mut req: Message<&'static str, Body<u32, io::Error>>| {
let body = req.take_body().unwrap();
if req == "one" {
debug!("drop body");
future::finished(Message::WithoutBody("resp-one")).boxed()
} else {
let mut tx = tx.clone();
body.for_each(move |chunk| {
mpsc::UnboundedSender::send(&mut tx, chunk).unwrap();
Ok(())
})
.and_then(|_| future::finished(Message::WithoutBody("resp-two")))
.boxed()
}
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg_with_body("one"));
for i in 0..5 {
mock.send(Frame::Body { chunk: Some(i) });
thread::sleep(Duration::from_millis(20));
}
assert_eq!(mock.next_write().unwrap_msg(), "resp-one");
// Send the next request
mock.send(msg_with_body("two"));
let mut rx = rx.wait();
for i in 0..5 {
mock.send(Frame::Body { chunk: Some(i) });
assert_eq!(i, rx.next().unwrap().unwrap());
}
mock.send(Frame::Body { chunk: None });
assert_eq!(mock.next_write().unwrap_msg(), "resp-two");
mock.allow_and_assert_drop();
}
#[test]
#[ignore]
fn test_transport_error_during_body_stream() {
}
#[test]
fn test_streaming_response_body() {
let (tx, rx) = mpsc::channel::<io::Result<u32>>(0);
let rx = RefCell::new(Some(rx));
let service = simple_service(move |req| {
assert_eq!(req, "omg");
let rx = rx.borrow_mut().take().unwrap();
let rx = rx.then(|r| r.unwrap()).boxed();
future::finished(Message::WithBody("hi2u", rx))
});
let (mut mock, _other) = mock::pipeline_server(service);
mock.send(msg("omg"));
assert_eq!(mock.next_write().unwrap_msg(), "hi2u");
let tx = tx.send(Ok(1)).wait().unwrap();
assert_eq!(Some(1), mock.next_write().unwrap_body());
let _ = tx.send(Ok(2)).wait().unwrap();
assert_eq!(Some(2), mock.next_write().unwrap_body());
assert_eq!(None, mock.next_write().unwrap_body());
mock.allow_and_assert_drop();
}
fn msg(msg: &'static str) -> Frame<&'static str, u32, io::Error> {
Frame::Message { message: msg, body: false }
}
fn msg_with_body(msg: &'static str) -> Frame<&'static str, u32, io::Error> {
Frame::Message { message: msg, body: true }
}<|fim▁end|>
|
assert_eq!(mock.next_write().unwrap_msg(), "resp");
assert_eq!(mock.next_write().unwrap_body(), Some(1));
|
<|file_name|>test.hpp<|end_file_name|><|fim▁begin|>// test
#ifndef TEST_HPP
#define TEST_HPP
<|fim▁hole|>#include <iostream>
#include <cmath>
#include <cassert>
#include "ga.hpp"
#include "ioga.hpp"
#include "ray.hpp"
#include "elem.hpp"
#include "rt.hpp"
#include "grid.hpp"
using namespace std;
#endif<|fim▁end|>
| |
<|file_name|>entry.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use crate::types::*;
use libimagstore::store::Entry;
use toml_query::read::TomlValueReadExt;
use toml_query::insert::TomlValueInsertExt;
use toml_query::delete::TomlValueDeleteExt;
use anyhow::Context;
use anyhow::Result;
use anyhow::Error;
pub trait GPSEntry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()>;
fn get_coordinates(&self) -> Result<Option<Coordinates>>;
/// Remove the coordinates from the entry
///
/// # Returns
///
/// The return type is a bit complicated, but that has a reason:
///
/// The outer Result<_> is used for notifying a failure during the header read/write action.
/// If the Option<_> is Some(_), the value was deleted.
/// The inner Result<_> is used for parsing failures during the parsing of the deleted value.
///
/// So:
///
/// * Ok(Some(Ok(_))) if the coordinates were deleted, returning the deleted value
/// * Ok(Some(Err(_))) if the coordinates were deleted, but the deleted value couldn't be parsed
/// * Ok(None) if there were no coordinates to delete
/// * Err(e) if the deleting failed
///
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>>;
}
impl GPSEntry for Entry {
fn set_coordinates(&mut self, c: Coordinates) -> Result<()> {
self.get_header_mut()
.insert("gps.coordinates", c.into())
.map(|_| ())
.context(anyhow!("Error while inserting header 'gps.coordinates' in '{}'", self.get_location()))
.map_err(Error::from)
}
fn get_coordinates(&self) -> Result<Option<Coordinates>> {
match self
.get_header()
.read("gps.coordinates")
.context(anyhow!("Error while reading header 'gps.coordinates' in '{}'", self.get_location()))?
{
Some(hdr) => Coordinates::from_value(hdr).map(Some),
None => Ok(None),
}
}
fn remove_coordinates(&mut self) -> Result<Option<Result<Coordinates>>> {
let coordinates = self.get_coordinates();
let patterns = [
"gps.coordinates.latitude.degree",
"gps.coordinates.latitude.minutes",
"gps.coordinates.latitude.seconds",
"gps.coordinates.longitude.degree",
"gps.coordinates.longitude.minutes",
"gps.coordinates.longitude.seconds",
"gps.coordinates.latitude",
"gps.coordinates.longitude",
"gps.coordinates",<|fim▁hole|> for pattern in patterns.iter() {
let _ = hdr.delete(pattern)
.context(anyhow!("Error while deleting header '{}'", pattern))
.context("Error writing header")?;
}
match coordinates {
Ok(None) => Ok(None),
Ok(Some(some)) => Ok(Some(Ok(some))),
Err(e) => Ok(Some(Err(e))),
}
}
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use libimagstore::store::Store;
use crate::entry::*;
fn setup_logging() {
let _ = ::env_logger::try_init;
}
fn get_store() -> Store {
Store::new_inmemory(PathBuf::from("/"), &None).unwrap()
}
#[test]
fn test_set_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_set_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
}
#[test]
fn test_setget_gps() {
setup_logging();
let store = get_store();
let mut entry = store.create(PathBuf::from("test_setget_gps")).unwrap();
let coordinates = Coordinates {
latitude: GPSValue::new(0, 0, 0),
longitude: GPSValue::new(0, 0, 0),
};
let res = entry.set_coordinates(coordinates);
assert!(res.is_ok());
let coordinates = entry.get_coordinates();
assert!(coordinates.is_ok());
let coordinates = coordinates.unwrap();
assert!(coordinates.is_some());
let coordinates = coordinates.unwrap();
assert_eq!(0, coordinates.longitude.degree);
assert_eq!(0, coordinates.longitude.minutes);
assert_eq!(0, coordinates.longitude.seconds);
assert_eq!(0, coordinates.latitude.degree);
assert_eq!(0, coordinates.latitude.minutes);
assert_eq!(0, coordinates.latitude.seconds);
}
}<|fim▁end|>
|
"gps",
];
let hdr = self.get_header_mut();
|
<|file_name|>elaborate_drops.rs<|end_file_name|><|fim▁begin|>use crate::MirPass;
use rustc_data_structures::fx::FxHashMap;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, DropFlagState, Unwind};
use rustc_mir_dataflow::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
use rustc_mir_dataflow::on_lookup_result_bits;
use rustc_mir_dataflow::MoveDataParamEnv;
use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits};
use rustc_mir_dataflow::{Analysis, ResultsCursor};
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
use std::fmt;
pub struct ElaborateDrops;
impl<'tcx> MirPass<'tcx> for ElaborateDrops {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!("elaborate_drops({:?} @ {:?})", body.source, body.span);
let def_id = body.source.def_id();
let param_env = tcx.param_env_reveal_all_normalized(def_id);
let move_data = match MoveData::gather_moves(body, tcx, param_env) {
Ok(move_data) => move_data,
Err((move_data, _)) => {
tcx.sess.delay_span_bug(
body.span,
"No `move_errors` should be allowed in MIR borrowck",
);
move_data
}
};
let elaborate_patch = {
let body = &*body;
let env = MoveDataParamEnv { move_data, param_env };
let dead_unwinds = find_dead_unwinds(tcx, body, &env);
let inits = MaybeInitializedPlaces::new(tcx, body, &env)
.into_engine(tcx, body)
.dead_unwinds(&dead_unwinds)
.pass_name("elaborate_drops")
.iterate_to_fixpoint()
.into_results_cursor(body);
let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
.mark_inactive_variants_as_uninit()
.into_engine(tcx, body)
.dead_unwinds(&dead_unwinds)
.pass_name("elaborate_drops")
.iterate_to_fixpoint()
.into_results_cursor(body);
ElaborateDropsCtxt {
tcx,
body,
env: &env,
init_data: InitializationData { inits, uninits },
drop_flags: Default::default(),
patch: MirPatch::new(body),
}
.elaborate()
};
elaborate_patch.apply(body);
}
}
/// Returns the set of basic blocks whose unwind edges are known
/// to not be reachable, because they are `drop` terminators
/// that can't drop anything.
fn find_dead_unwinds<'tcx>(
tcx: TyCtxt<'tcx>,
body: &Body<'tcx>,
env: &MoveDataParamEnv<'tcx>,
) -> BitSet<BasicBlock> {
debug!("find_dead_unwinds({:?})", body.span);
// We only need to do this pass once, because unwind edges can only
// reach cleanup blocks, which can't have unwind edges themselves.
let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
.into_engine(tcx, body)
.pass_name("find_dead_unwinds")
.iterate_to_fixpoint()
.into_results_cursor(body);
for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
let place = match bb_data.terminator().kind {
TerminatorKind::Drop { ref place, unwind: Some(_), .. }
| TerminatorKind::DropAndReplace { ref place, unwind: Some(_), .. } => place,
_ => continue,
};
debug!("find_dead_unwinds @ {:?}: {:?}", bb, bb_data);
let path = match env.move_data.rev_lookup.find(place.as_ref()) {
LookupResult::Exact(e) => e,
LookupResult::Parent(..) => {
debug!("find_dead_unwinds: has parent; skipping");
continue;
}
};
flow_inits.seek_before_primary_effect(body.terminator_loc(bb));
debug!(
"find_dead_unwinds @ {:?}: path({:?})={:?}; init_data={:?}",
bb,
place,
path,
flow_inits.get()
);
let mut maybe_live = false;
on_all_drop_children_bits(tcx, body, &env, path, |child| {
maybe_live |= flow_inits.contains(child);
});
debug!("find_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live);
if !maybe_live {
dead_unwinds.insert(bb);
}
}
dead_unwinds
}
struct InitializationData<'mir, 'tcx> {
inits: ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
uninits: ResultsCursor<'mir, 'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
}
impl InitializationData<'_, '_> {
fn seek_before(&mut self, loc: Location) {
self.inits.seek_before_primary_effect(loc);
self.uninits.seek_before_primary_effect(loc);
}
fn maybe_live_dead(&self, path: MovePathIndex) -> (bool, bool) {
(self.inits.contains(path), self.uninits.contains(path))
}
}
struct Elaborator<'a, 'b, 'tcx> {
ctxt: &'a mut ElaborateDropsCtxt<'b, 'tcx>,
}
impl<'a, 'b, 'tcx> fmt::Debug for Elaborator<'a, 'b, 'tcx> {
fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
Ok(())
}
}
impl<'a, 'b, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, 'b, 'tcx> {
type Path = MovePathIndex;
fn patch(&mut self) -> &mut MirPatch<'tcx> {
&mut self.ctxt.patch
}
fn body(&self) -> &'a Body<'tcx> {
self.ctxt.body
}
fn tcx(&self) -> TyCtxt<'tcx> {
self.ctxt.tcx
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.ctxt.param_env()
}
fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
let ((maybe_live, maybe_dead), multipart) = match mode {
DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false),
DropFlagMode::Deep => {
let mut some_live = false;
let mut some_dead = false;
let mut children_count = 0;
on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
some_live |= live;
some_dead |= dead;
children_count += 1;
});
((some_live, some_dead), children_count != 1)
}
};
match (maybe_live, maybe_dead, multipart) {<|fim▁hole|> (true, true, true) => DropStyle::Open,
}
}
fn clear_drop_flag(&mut self, loc: Location, path: Self::Path, mode: DropFlagMode) {
match mode {
DropFlagMode::Shallow => {
self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent);
}
DropFlagMode::Deep => {
on_all_children_bits(
self.tcx(),
self.body(),
self.ctxt.move_data(),
path,
|child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent),
);
}
}
}
fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path> {
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
ProjectionElem::Field(idx, _) => idx == field,
_ => false,
})
}
fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path> {
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
debug_assert!(size == min_length, "min_length should be exact for arrays");
assert!(!from_end, "from_end should not be used for array element ConstantIndex");
offset == index
}
_ => false,
})
}
fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> {
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| {
e == ProjectionElem::Deref
})
}
fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path> {
rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
ProjectionElem::Downcast(_, idx) => idx == variant,
_ => false,
})
}
fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> {
self.ctxt.drop_flag(path).map(Operand::Copy)
}
}
struct ElaborateDropsCtxt<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
env: &'a MoveDataParamEnv<'tcx>,
init_data: InitializationData<'a, 'tcx>,
drop_flags: FxHashMap<MovePathIndex, Local>,
patch: MirPatch<'tcx>,
}
impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn move_data(&self) -> &'b MoveData<'tcx> {
&self.env.move_data
}
fn param_env(&self) -> ty::ParamEnv<'tcx> {
self.env.param_env
}
fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
let tcx = self.tcx;
let patch = &mut self.patch;
debug!("create_drop_flag({:?})", self.body.span);
self.drop_flags.entry(index).or_insert_with(|| patch.new_internal(tcx.types.bool, span));
}
fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
self.drop_flags.get(&index).map(|t| Place::from(*t))
}
/// create a patch that elaborates all drops in the input
/// MIR.
fn elaborate(mut self) -> MirPatch<'tcx> {
self.collect_drop_flags();
self.elaborate_drops();
self.drop_flags_on_init();
self.drop_flags_for_fn_rets();
self.drop_flags_for_args();
self.drop_flags_for_locs();
self.patch
}
fn collect_drop_flags(&mut self) {
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
let terminator = data.terminator();
let place = match terminator.kind {
TerminatorKind::Drop { ref place, .. }
| TerminatorKind::DropAndReplace { ref place, .. } => place,
_ => continue,
};
self.init_data.seek_before(self.body.terminator_loc(bb));
let path = self.move_data().rev_lookup.find(place.as_ref());
debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
let path = match path {
LookupResult::Exact(e) => e,
LookupResult::Parent(None) => continue,
LookupResult::Parent(Some(parent)) => {
let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
if maybe_dead {
span_bug!(
terminator.source_info.span,
"drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
bb,
place,
path
);
}
continue;
}
};
on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
debug!(
"collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
child,
place,
path,
(maybe_live, maybe_dead)
);
if maybe_live && maybe_dead {
self.create_drop_flag(child, terminator.source_info.span)
}
});
}
}
fn elaborate_drops(&mut self) {
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
let loc = Location { block: bb, statement_index: data.statements.len() };
let terminator = data.terminator();
let resume_block = self.patch.resume_block();
match terminator.kind {
TerminatorKind::Drop { place, target, unwind } => {
self.init_data.seek_before(loc);
match self.move_data().rev_lookup.find(place.as_ref()) {
LookupResult::Exact(path) => elaborate_drop(
&mut Elaborator { ctxt: self },
terminator.source_info,
place,
path,
target,
if data.is_cleanup {
Unwind::InCleanup
} else {
Unwind::To(Option::unwrap_or(unwind, resume_block))
},
bb,
),
LookupResult::Parent(..) => {
span_bug!(
terminator.source_info.span,
"drop of untracked value {:?}",
bb
);
}
}
}
TerminatorKind::DropAndReplace { place, ref value, target, unwind } => {
assert!(!data.is_cleanup);
self.elaborate_replace(loc, place, value, target, unwind);
}
_ => continue,
}
}
}
/// Elaborate a MIR `replace` terminator. This instruction
/// is not directly handled by codegen, and therefore
/// must be desugared.
///
/// The desugaring drops the location if needed, and then writes
/// the value (including setting the drop flag) over it in *both* arms.
///
/// The `replace` terminator can also be called on places that
/// are not tracked by elaboration (for example,
/// `replace x[i] <- tmp0`). The borrow checker requires that
/// these locations are initialized before the assignment,
/// so we just generate an unconditional drop.
fn elaborate_replace(
&mut self,
loc: Location,
place: Place<'tcx>,
value: &Operand<'tcx>,
target: BasicBlock,
unwind: Option<BasicBlock>,
) {
let bb = loc.block;
let data = &self.body[bb];
let terminator = data.terminator();
assert!(!data.is_cleanup, "DropAndReplace in unwind path not supported");
let assign = Statement {
kind: StatementKind::Assign(Box::new((place, Rvalue::Use(value.clone())))),
source_info: terminator.source_info,
};
let unwind = unwind.unwrap_or_else(|| self.patch.resume_block());
let unwind = self.patch.new_block(BasicBlockData {
statements: vec![assign.clone()],
terminator: Some(Terminator {
kind: TerminatorKind::Goto { target: unwind },
..*terminator
}),
is_cleanup: true,
});
let target = self.patch.new_block(BasicBlockData {
statements: vec![assign],
terminator: Some(Terminator { kind: TerminatorKind::Goto { target }, ..*terminator }),
is_cleanup: false,
});
match self.move_data().rev_lookup.find(place.as_ref()) {
LookupResult::Exact(path) => {
debug!("elaborate_drop_and_replace({:?}) - tracked {:?}", terminator, path);
self.init_data.seek_before(loc);
elaborate_drop(
&mut Elaborator { ctxt: self },
terminator.source_info,
place,
path,
target,
Unwind::To(unwind),
bb,
);
on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
self.set_drop_flag(
Location { block: target, statement_index: 0 },
child,
DropFlagState::Present,
);
self.set_drop_flag(
Location { block: unwind, statement_index: 0 },
child,
DropFlagState::Present,
);
});
}
LookupResult::Parent(parent) => {
// drop and replace behind a pointer/array/whatever. The location
// must be initialized.
debug!("elaborate_drop_and_replace({:?}) - untracked {:?}", terminator, parent);
self.patch.patch_terminator(
bb,
TerminatorKind::Drop { place, target, unwind: Some(unwind) },
);
}
}
}
fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> {
Rvalue::Use(Operand::Constant(Box::new(Constant {
span,
user_ty: None,
literal: ty::Const::from_bool(self.tcx, val).into(),
})))
}
fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) {
if let Some(&flag) = self.drop_flags.get(&path) {
let span = self.patch.source_info_for_location(self.body, loc).span;
let val = self.constant_bool(span, val.value());
self.patch.add_assign(loc, Place::from(flag), val);
}
}
fn drop_flags_on_init(&mut self) {
let loc = Location::START;
let span = self.patch.source_info_for_location(self.body, loc).span;
let false_ = self.constant_bool(span, false);
for flag in self.drop_flags.values() {
self.patch.add_assign(loc, Place::from(*flag), false_.clone());
}
}
fn drop_flags_for_fn_rets(&mut self) {
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
if let TerminatorKind::Call {
destination: Some((ref place, tgt)),
cleanup: Some(_),
..
} = data.terminator().kind
{
assert!(!self.patch.is_patched(bb));
let loc = Location { block: tgt, statement_index: 0 };
let path = self.move_data().rev_lookup.find(place.as_ref());
on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
self.set_drop_flag(loc, child, DropFlagState::Present)
});
}
}
}
fn drop_flags_for_args(&mut self) {
let loc = Location::START;
rustc_mir_dataflow::drop_flag_effects_for_function_entry(
self.tcx,
self.body,
self.env,
|path, ds| {
self.set_drop_flag(loc, path, ds);
},
)
}
fn drop_flags_for_locs(&mut self) {
// We intentionally iterate only over the *old* basic blocks.
//
// Basic blocks created by drop elaboration update their
// drop flags by themselves, to avoid the drop flags being
// clobbered before they are read.
for (bb, data) in self.body.basic_blocks().iter_enumerated() {
debug!("drop_flags_for_locs({:?})", data);
for i in 0..(data.statements.len() + 1) {
debug!("drop_flag_for_locs: stmt {}", i);
let mut allow_initializations = true;
if i == data.statements.len() {
match data.terminator().kind {
TerminatorKind::Drop { .. } => {
// drop elaboration should handle that by itself
continue;
}
TerminatorKind::DropAndReplace { .. } => {
// this contains the move of the source and
// the initialization of the destination. We
// only want the former - the latter is handled
// by the elaboration code and must be done
// *after* the destination is dropped.
assert!(self.patch.is_patched(bb));
allow_initializations = false;
}
TerminatorKind::Resume => {
// It is possible for `Resume` to be patched
// (in particular it can be patched to be replaced with
// a Goto; see `MirPatch::new`).
}
_ => {
assert!(!self.patch.is_patched(bb));
}
}
}
let loc = Location { block: bb, statement_index: i };
rustc_mir_dataflow::drop_flag_effects_for_location(
self.tcx,
self.body,
self.env,
loc,
|path, ds| {
if ds == DropFlagState::Absent || allow_initializations {
self.set_drop_flag(loc, path, ds)
}
},
)
}
// There may be a critical edge after this call,
// so mark the return as initialized *before* the
// call.
if let TerminatorKind::Call {
destination: Some((ref place, _)), cleanup: None, ..
} = data.terminator().kind
{
assert!(!self.patch.is_patched(bb));
let loc = Location { block: bb, statement_index: data.statements.len() };
let path = self.move_data().rev_lookup.find(place.as_ref());
on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
self.set_drop_flag(loc, child, DropFlagState::Present)
});
}
}
}
}<|fim▁end|>
|
(false, _, _) => DropStyle::Dead,
(true, false, _) => DropStyle::Static,
(true, true, false) => DropStyle::Conditional,
|
<|file_name|>Subject.js<|end_file_name|><|fim▁begin|>namespace("JSTools.Event");
/// <class>
/// Provides an interface for attaching and detaching Observer objects. Any number of
/// Observer objects may observe a subject.
/// </class>
JSTools.Event.Subject = function()
{
//------------------------------------------------------------------------
// Declarations
//------------------------------------------------------------------------
this.InitType(arguments, "JSTools.Event.Subject");
var _this = this;
var _observers = null;
//------------------------------------------------------------------------
// Constructor
//------------------------------------------------------------------------
/// <constructor><|fim▁hole|> /// Creates a new JSTools.Event.Subject instance.
/// </constructor>
function Init()
{
_this.Clear();
}
//------------------------------------------------------------------------
// Methods
//------------------------------------------------------------------------
/// <method>
/// Removes all registered observer object.
/// </method>
function Clear()
{
_observers = [ ];
}
this.Clear = Clear;
/// <method>
/// Attaches the given observer function to this subject.
/// </method>
/// <param name="objIObserver" type="JSTools.Event.IObserver">Observer to attach.</param>
/// <returns type="Integer">Returns the index, at which the observer object has been added.
/// Returns -1 if the given observer object is invalid and not added.</returns>
function Attach(objIObserver)
{
if (objIObserver
&& typeof(objIObserver) == 'object'
&& objIObserver.IsTypeOf(JSTools.Event.IObserver))
{
_observers.Add(objIObserver);
return _observers.length - 1;
}
return -1;
}
this.Attach = Attach;
/// <method>
/// Detaches the given observer object from this subject.
/// </method>
/// <param name="objIObserverToDetach" type="JSTools.Event.IObserver">Observer to detach.</param>
function Detach(objIObserverToDetach)
{
_observers.Remove(objIObserverToDetach);
}
this.Detach = Detach;
/// <method>
/// Detaches an observer at the given index from this subject.
/// </method>
/// <param name="intIndex" type="Integer">Index to detach.</param>
function DetachByIndex(intIndex)
{
_observers.RemoveAt(intIndex);
}
this.DetachByIndex = DetachByIndex;
/// <method>
/// Notifies the observer about an update.
/// </method>
/// <param name="objEvent" type="Object">An object instance, which represents the event argument.</param>
function Notify(objEvent)
{
for (var i = 0; i < _observers.length; ++i)
{
_observers[i].Update(objEvent);
}
}
this.Notify = Notify;
Init();
}<|fim▁end|>
| |
<|file_name|>vendor.ts<|end_file_name|><|fim▁begin|>// Angular 2
import '@angular/platform-browser';
import '@angular/platform-browser-dynamic';
import '@angular/core';
import '@angular/common';
import '@angular/http';
import '@angular/router';
import 'rxjs/add/observable/throw';
import 'rxjs/add/observable/of';
import 'rxjs/add/operator/catch';
import 'rxjs/add/operator/debounceTime';
import 'rxjs/add/operator/distinctUntilChanged';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/switchMap';
import 'rxjs/add/operator/filter';
//Materialize<|fim▁hole|>import 'materialize-css/bin/materialize.css'
import 'materialize-css/bin/materialize.js'<|fim▁end|>
| |
<|file_name|>MovieCollection.js<|end_file_name|><|fim▁begin|>import PropTypes from 'prop-types';
import React, { Component } from 'react';
import MonitorToggleButton from 'Components/MonitorToggleButton';<|fim▁hole|>class MovieCollection extends Component {
//
// Lifecycle
constructor(props, context) {
super(props, context);
this.state = {
hasPosterError: false,
isEditImportListModalOpen: false
};
}
onAddImportListPress = (monitored) => {
if (this.props.collectionList) {
this.props.onMonitorTogglePress(monitored);
} else {
this.props.onMonitorTogglePress(monitored);
this.setState({ isEditImportListModalOpen: true });
}
}
onEditImportListModalClose = () => {
this.setState({ isEditImportListModalOpen: false });
}
render() {
const {
name,
collectionList,
isSaving
} = this.props;
const monitored = collectionList !== undefined && collectionList.enabled && collectionList.enableAuto;
const importListId = collectionList ? collectionList.id : 0;
return (
<div>
<MonitorToggleButton
className={styles.monitorToggleButton}
monitored={monitored}
isSaving={isSaving}
size={15}
onPress={this.onAddImportListPress}
/>
{name}
<EditImportListModalConnector
id={importListId}
isOpen={this.state.isEditImportListModalOpen}
onModalClose={this.onEditImportListModalClose}
onDeleteImportListPress={this.onDeleteImportListPress}
/>
</div>
);
}
}
MovieCollection.propTypes = {
tmdbId: PropTypes.number.isRequired,
name: PropTypes.string.isRequired,
collectionList: PropTypes.object,
isSaving: PropTypes.bool.isRequired,
onMonitorTogglePress: PropTypes.func.isRequired
};
export default MovieCollection;<|fim▁end|>
|
import EditImportListModalConnector from 'Settings/ImportLists/ImportLists/EditImportListModalConnector';
import styles from './MovieCollection.css';
|
<|file_name|>WebGLRenderGroup.js<|end_file_name|><|fim▁begin|>/**
* @author Mat Groves http://matgroves.com/ @Doormat23
*/
/**
* A WebGLBatch Enables a group of sprites to be drawn using the same settings.
* if a group of sprites all have the same baseTexture and blendMode then they can be
* grouped into a batch. All the sprites in a batch can then be drawn in one go by the
* GPU which is hugely efficient. ALL sprites in the webGL renderer are added to a batch
* even if the batch only contains one sprite. Batching is handled automatically by the
* webGL renderer. A good tip is: the smaller the number of batchs there are, the faster
* the webGL renderer will run.
*
* @class WebGLBatch
* @contructor
* @param gl {WebGLContext} An instance of the webGL context
*/
PIXI.WebGLRenderGroup = function(gl, transparent)
{
this.gl = gl;
this.root;
this.backgroundColor;
this.transparent = transparent == undefined ? true : transparent;
this.batchs = [];
this.toRemove = [];
// console.log(this.transparent)
this.filterManager = new PIXI.WebGLFilterManager(this.transparent);
}
// constructor
PIXI.WebGLRenderGroup.prototype.constructor = PIXI.WebGLRenderGroup;
/**
* Add a display object to the webgl renderer
*
* @method setRenderable
* @param displayObject {DisplayObject}
* @private
*/
PIXI.WebGLRenderGroup.prototype.setRenderable = function(displayObject)
{
// has this changed??
if(this.root)this.removeDisplayObjectAndChildren(this.root);
displayObject.worldVisible = displayObject.visible;
// soooooo //
// to check if any batchs exist already??
// TODO what if its already has an object? should remove it
this.root = displayObject;
this.addDisplayObjectAndChildren(displayObject);
}
/**
* Renders the stage to its webgl view
*
* @method render
* @param projection {Object}
*/
PIXI.WebGLRenderGroup.prototype.render = function(projection, buffer)
{
PIXI.WebGLRenderer.updateTextures();
var gl = this.gl;
gl.uniform2f(PIXI.defaultShader.projectionVector, projection.x, projection.y);
this.filterManager.begin(projection, buffer);
gl.blendFunc(gl.ONE, gl.ONE_MINUS_SRC_ALPHA);
// will render all the elements in the group
var renderable;
for (var i=0; i < this.batchs.length; i++)
{
renderable = this.batchs[i];
if(renderable instanceof PIXI.WebGLBatch)
{
this.batchs[i].render();
continue;
}
// render special
this.renderSpecial(renderable, projection);
}
}
/**
* Renders a specific displayObject
*
* @method renderSpecific
* @param displayObject {DisplayObject}
* @param projection {Object}
* @private
*/
PIXI.WebGLRenderGroup.prototype.renderSpecific = function(displayObject, projection, buffer)
{
PIXI.WebGLRenderer.updateTextures();
var gl = this.gl;
gl.uniform2f(PIXI.defaultShader.projectionVector, projection.x, projection.y);
this.filterManager.begin(projection, buffer);
// to do!
// render part of the scene...
var startIndex;
var startBatchIndex;
var endIndex;
var endBatchIndex;
/*
* LOOK FOR THE NEXT SPRITE
* This part looks for the closest next sprite that can go into a batch
* it keeps looking until it finds a sprite or gets to the end of the display
* scene graph
*/
var nextRenderable = displayObject.first;
while(nextRenderable._iNext)
{
if(nextRenderable.renderable && nextRenderable.__renderGroup)break;
nextRenderable = nextRenderable._iNext;
}
var startBatch = nextRenderable.batch;
//console.log(nextRenderable);
//console.log(renderable)
if(nextRenderable instanceof PIXI.Sprite)
{
startBatch = nextRenderable.batch;
var head = startBatch.head;
var next = head;
// ok now we have the batch.. need to find the start index!
if(head == nextRenderable)
{
startIndex = 0;
}
else
{
startIndex = 1;
<|fim▁hole|> {
startIndex++;
head = head.__next;
}
}
}
else
{
startBatch = nextRenderable;
}
// Get the LAST renderable object
var lastRenderable = displayObject.last;
while(lastRenderable._iPrev)
{
if(lastRenderable.renderable && lastRenderable.__renderGroup)break;
lastRenderable = lastRenderable._iNext;
}
if(lastRenderable instanceof PIXI.Sprite)
{
endBatch = lastRenderable.batch;
var head = endBatch.head;
if(head == lastRenderable)
{
endIndex = 0;
}
else
{
endIndex = 1;
while(head.__next != lastRenderable)
{
endIndex++;
head = head.__next;
}
}
}
else
{
endBatch = lastRenderable;
}
if(startBatch == endBatch)
{
if(startBatch instanceof PIXI.WebGLBatch)
{
startBatch.render(startIndex, endIndex+1);
}
else
{
this.renderSpecial(startBatch, projection);
}
return;
}
// now we have first and last!
startBatchIndex = this.batchs.indexOf(startBatch);
endBatchIndex = this.batchs.indexOf(endBatch);
// DO the first batch
if(startBatch instanceof PIXI.WebGLBatch)
{
startBatch.render(startIndex);
}
else
{
this.renderSpecial(startBatch, projection);
}
// DO the middle batchs..
for (var i=startBatchIndex+1; i < endBatchIndex; i++)
{
renderable = this.batchs[i];
if(renderable instanceof PIXI.WebGLBatch)
{
this.batchs[i].render();
}
else
{
this.renderSpecial(renderable, projection);
}
}
// DO the last batch..
if(endBatch instanceof PIXI.WebGLBatch)
{
endBatch.render(0, endIndex+1);
}
else
{
this.renderSpecial(endBatch, projection);
}
}
/**
* Renders a specific renderable
*
* @method renderSpecial
* @param renderable {DisplayObject}
* @param projection {Object}
* @private
*/
PIXI.WebGLRenderGroup.prototype.renderSpecial = function(renderable, projection)
{
var worldVisible = renderable.vcount === PIXI.visibleCount
if(renderable instanceof PIXI.TilingSprite)
{
if(worldVisible)this.renderTilingSprite(renderable, projection);
}
else if(renderable instanceof PIXI.Strip)
{
if(worldVisible)this.renderStrip(renderable, projection);
}
else if(renderable instanceof PIXI.CustomRenderable)
{
if(worldVisible) renderable.renderWebGL(this, projection);
}
else if(renderable instanceof PIXI.Graphics)
{
if(worldVisible && renderable.renderable) PIXI.WebGLGraphics.renderGraphics(renderable, projection);
}
else if(renderable instanceof PIXI.FilterBlock)
{
this.handleFilterBlock(renderable, projection);
}
}
flip = false;
var maskStack = [];
var maskPosition = 0;
//var usedMaskStack = [];
PIXI.WebGLRenderGroup.prototype.handleFilterBlock = function(filterBlock, projection)
{
/*
* for now only masks are supported..
*/
var gl = PIXI.gl;
if(filterBlock.open)
{
if(filterBlock.data instanceof Array)
{
this.filterManager.pushFilter(filterBlock);
// ok so..
}
else
{
maskPosition++;
maskStack.push(filterBlock)
gl.enable(gl.STENCIL_TEST);
gl.colorMask(false, false, false, false);
gl.stencilFunc(gl.ALWAYS,1,1);
gl.stencilOp(gl.KEEP,gl.KEEP,gl.INCR);
PIXI.WebGLGraphics.renderGraphics(filterBlock.data, projection);
gl.colorMask(true, true, true, true);
gl.stencilFunc(gl.NOTEQUAL,0,maskStack.length);
gl.stencilOp(gl.KEEP,gl.KEEP,gl.KEEP);
}
}
else
{
if(filterBlock.data instanceof Array)
{
this.filterManager.popFilter();
}
else
{
var maskData = maskStack.pop(filterBlock)
if(maskData)
{
gl.colorMask(false, false, false, false);
gl.stencilFunc(gl.ALWAYS,1,1);
gl.stencilOp(gl.KEEP,gl.KEEP,gl.DECR);
PIXI.WebGLGraphics.renderGraphics(maskData.data, projection);
gl.colorMask(true, true, true, true);
gl.stencilFunc(gl.NOTEQUAL,0,maskStack.length);
gl.stencilOp(gl.KEEP,gl.KEEP,gl.KEEP);
};
gl.disable(gl.STENCIL_TEST);
}
}
}
/**
* Updates a webgl texture
*
* @method updateTexture
* @param displayObject {DisplayObject}
* @private
*/
PIXI.WebGLRenderGroup.prototype.updateTexture = function(displayObject)
{
// TODO definitely can optimse this function..
this.removeObject(displayObject);
/*
* LOOK FOR THE PREVIOUS RENDERABLE
* This part looks for the closest previous sprite that can go into a batch
* It keeps going back until it finds a sprite or the stage
*/
var previousRenderable = displayObject.first;
while(previousRenderable != this.root)
{
previousRenderable = previousRenderable._iPrev;
if(previousRenderable.renderable && previousRenderable.__renderGroup)break;
}
/*
* LOOK FOR THE NEXT SPRITE
* This part looks for the closest next sprite that can go into a batch
* it keeps looking until it finds a sprite or gets to the end of the display
* scene graph
*/
var nextRenderable = displayObject.last;
while(nextRenderable._iNext)
{
nextRenderable = nextRenderable._iNext;
if(nextRenderable.renderable && nextRenderable.__renderGroup)break;
}
this.insertObject(displayObject, previousRenderable, nextRenderable);
}
/**
* Adds filter blocks
*
* @method addFilterBlocks
* @param start {FilterBlock}
* @param end {FilterBlock}
* @private
*/
PIXI.WebGLRenderGroup.prototype.addFilterBlocks = function(start, end)
{
start.__renderGroup = this;
end.__renderGroup = this;
/*
* LOOK FOR THE PREVIOUS RENDERABLE
* This part looks for the closest previous sprite that can go into a batch
* It keeps going back until it finds a sprite or the stage
*/
var previousRenderable = start;
while(previousRenderable != this.root.first)
{
previousRenderable = previousRenderable._iPrev;
if(previousRenderable.renderable && previousRenderable.__renderGroup)break;
}
this.insertAfter(start, previousRenderable);
/*
* LOOK FOR THE NEXT SPRITE
* This part looks for the closest next sprite that can go into a batch
* it keeps looking until it finds a sprite or gets to the end of the display
* scene graph
*/
var previousRenderable2 = end;
while(previousRenderable2 != this.root.first)
{
previousRenderable2 = previousRenderable2._iPrev;
if(previousRenderable2.renderable && previousRenderable2.__renderGroup)break;
}
this.insertAfter(end, previousRenderable2);
}
/**
* Remove filter blocks
*
* @method removeFilterBlocks
* @param start {FilterBlock}
* @param end {FilterBlock}
* @private
*/
PIXI.WebGLRenderGroup.prototype.removeFilterBlocks = function(start, end)
{
this.removeObject(start);
this.removeObject(end);
}
/**
* Adds a display object and children to the webgl context
*
* @method addDisplayObjectAndChildren
* @param displayObject {DisplayObject}
* @private
*/
PIXI.WebGLRenderGroup.prototype.addDisplayObjectAndChildren = function(displayObject)
{
if(displayObject.__renderGroup)displayObject.__renderGroup.removeDisplayObjectAndChildren(displayObject);
/*
* LOOK FOR THE PREVIOUS RENDERABLE
* This part looks for the closest previous sprite that can go into a batch
* It keeps going back until it finds a sprite or the stage
*/
var previousRenderable = displayObject.first;
while(previousRenderable != this.root.first)
{
previousRenderable = previousRenderable._iPrev;
if(previousRenderable.renderable && previousRenderable.__renderGroup)break;
}
/*
* LOOK FOR THE NEXT SPRITE
* This part looks for the closest next sprite that can go into a batch
* it keeps looking until it finds a sprite or gets to the end of the display
* scene graph
*/
var nextRenderable = displayObject.last;
while(nextRenderable._iNext)
{
nextRenderable = nextRenderable._iNext;
if(nextRenderable.renderable && nextRenderable.__renderGroup)break;
}
// one the display object hits this. we can break the loop
var tempObject = displayObject.first;
var testObject = displayObject.last._iNext;
do
{
tempObject.__renderGroup = this;
if(tempObject.renderable)
{
this.insertObject(tempObject, previousRenderable, nextRenderable);
previousRenderable = tempObject;
}
tempObject = tempObject._iNext;
}
while(tempObject != testObject)
}
/**
* Removes a display object and children to the webgl context
*
* @method removeDisplayObjectAndChildren
* @param displayObject {DisplayObject}
* @private
*/
PIXI.WebGLRenderGroup.prototype.removeDisplayObjectAndChildren = function(displayObject)
{
if(displayObject.__renderGroup != this)return;
// var displayObject = displayObject.first;
var lastObject = displayObject.last;
do
{
displayObject.__renderGroup = null;
if(displayObject.renderable)this.removeObject(displayObject);
displayObject = displayObject._iNext;
}
while(displayObject)
}
/**
* Inserts a displayObject into the linked list
*
* @method insertObject
* @param displayObject {DisplayObject}
* @param previousObject {DisplayObject}
* @param nextObject {DisplayObject}
* @private
*/
PIXI.WebGLRenderGroup.prototype.insertObject = function(displayObject, previousObject, nextObject)
{
// while looping below THE OBJECT MAY NOT HAVE BEEN ADDED
var previousSprite = previousObject;
var nextSprite = nextObject;
/*
* so now we have the next renderable and the previous renderable
*
*/
if(displayObject instanceof PIXI.Sprite)
{
var previousBatch
var nextBatch
if(previousSprite instanceof PIXI.Sprite)
{
previousBatch = previousSprite.batch;
if(previousBatch)
{
if(previousBatch.texture == displayObject.texture.baseTexture && previousBatch.blendMode == displayObject.blendMode)
{
previousBatch.insertAfter(displayObject, previousSprite);
return;
}
}
}
else
{
// TODO reword!
previousBatch = previousSprite;
}
if(nextSprite)
{
if(nextSprite instanceof PIXI.Sprite)
{
nextBatch = nextSprite.batch;
//batch may not exist if item was added to the display list but not to the webGL
if(nextBatch)
{
if(nextBatch.texture == displayObject.texture.baseTexture && nextBatch.blendMode == displayObject.blendMode)
{
nextBatch.insertBefore(displayObject, nextSprite);
return;
}
else
{
if(nextBatch == previousBatch)
{
// THERE IS A SPLIT IN THIS BATCH! //
var splitBatch = previousBatch.split(nextSprite);
// COOL!
// add it back into the array
/*
* OOPS!
* seems the new sprite is in the middle of a batch
* lets split it..
*/
var batch = PIXI.WebGLRenderer.getBatch();
var index = this.batchs.indexOf( previousBatch );
batch.init(displayObject);
this.batchs.splice(index+1, 0, batch, splitBatch);
return;
}
}
}
}
else
{
// TODO re-word!
nextBatch = nextSprite;
}
}
/*
* looks like it does not belong to any batch!
* but is also not intersecting one..
* time to create anew one!
*/
var batch = PIXI.WebGLRenderer.getBatch();
batch.init(displayObject);
if(previousBatch) // if this is invalid it means
{
var index = this.batchs.indexOf( previousBatch );
this.batchs.splice(index+1, 0, batch);
}
else
{
this.batchs.push(batch);
}
return;
}
else if(displayObject instanceof PIXI.TilingSprite)
{
// add to a batch!!
this.initTilingSprite(displayObject);
// this.batchs.push(displayObject);
}
else if(displayObject instanceof PIXI.Strip)
{
// add to a batch!!
this.initStrip(displayObject);
// this.batchs.push(displayObject);
}
else if(displayObject)// instanceof PIXI.Graphics)
{
//displayObject.initWebGL(this);
// add to a batch!!
//this.initStrip(displayObject);
//this.batchs.push(displayObject);
}
this.insertAfter(displayObject, previousSprite);
// insert and SPLIT!
}
/**
* Inserts a displayObject into the linked list
*
* @method insertAfter
* @param item {DisplayObject}
* @param displayObject {DisplayObject} The object to insert
* @private
*/
PIXI.WebGLRenderGroup.prototype.insertAfter = function(item, displayObject)
{
if(displayObject instanceof PIXI.Sprite)
{
var previousBatch = displayObject.batch;
if(previousBatch)
{
// so this object is in a batch!
// is it not? need to split the batch
if(previousBatch.tail == displayObject)
{
// is it tail? insert in to batchs
var index = this.batchs.indexOf( previousBatch );
this.batchs.splice(index+1, 0, item);
}
else
{
// TODO MODIFY ADD / REMOVE CHILD TO ACCOUNT FOR FILTERS (also get prev and next) //
// THERE IS A SPLIT IN THIS BATCH! //
var splitBatch = previousBatch.split(displayObject.__next);
// COOL!
// add it back into the array
/*
* OOPS!
* seems the new sprite is in the middle of a batch
* lets split it..
*/
var index = this.batchs.indexOf( previousBatch );
this.batchs.splice(index+1, 0, item, splitBatch);
}
}
else
{
this.batchs.push(item);
}
}
else
{
var index = this.batchs.indexOf( displayObject );
this.batchs.splice(index+1, 0, item);
}
}
/**
* Removes a displayObject from the linked list
*
* @method removeObject
* @param displayObject {DisplayObject} The object to remove
* @private
*/
PIXI.WebGLRenderGroup.prototype.removeObject = function(displayObject)
{
// loop through children..
// display object //
// add a child from the render group..
// remove it and all its children!
//displayObject.cacheVisible = false;//displayObject.visible;
/*
* removing is a lot quicker..
*
*/
var batchToRemove;
if(displayObject instanceof PIXI.Sprite)
{
// should always have a batch!
var batch = displayObject.batch;
if(!batch)return; // this means the display list has been altered befre rendering
batch.remove(displayObject);
if(batch.size==0)
{
batchToRemove = batch;
}
}
else
{
batchToRemove = displayObject;
}
/*
* Looks like there is somthing that needs removing!
*/
if(batchToRemove)
{
var index = this.batchs.indexOf( batchToRemove );
if(index == -1)return;// this means it was added then removed before rendered
// ok so.. check to see if you adjacent batchs should be joined.
// TODO may optimise?
if(index == 0 || index == this.batchs.length-1)
{
// wha - eva! just get of the empty batch!
this.batchs.splice(index, 1);
if(batchToRemove instanceof PIXI.WebGLBatch)PIXI.WebGLRenderer.returnBatch(batchToRemove);
return;
}
if(this.batchs[index-1] instanceof PIXI.WebGLBatch && this.batchs[index+1] instanceof PIXI.WebGLBatch)
{
if(this.batchs[index-1].texture == this.batchs[index+1].texture && this.batchs[index-1].blendMode == this.batchs[index+1].blendMode)
{
//console.log("MERGE")
this.batchs[index-1].merge(this.batchs[index+1]);
if(batchToRemove instanceof PIXI.WebGLBatch)PIXI.WebGLRenderer.returnBatch(batchToRemove);
PIXI.WebGLRenderer.returnBatch(this.batchs[index+1]);
this.batchs.splice(index, 2);
return;
}
}
this.batchs.splice(index, 1);
if(batchToRemove instanceof PIXI.WebGLBatch)PIXI.WebGLRenderer.returnBatch(batchToRemove);
}
}
/**
* Initializes a tiling sprite
*
* @method initTilingSprite
* @param sprite {TilingSprite} The tiling sprite to initialize
* @private
*/
PIXI.WebGLRenderGroup.prototype.initTilingSprite = function(sprite)
{
var gl = this.gl;
// make the texture tilable..
sprite.verticies = new Float32Array([0, 0,
sprite.width, 0,
sprite.width, sprite.height,
0, sprite.height]);
sprite.uvs = new Float32Array([0, 0,
1, 0,
1, 1,
0, 1]);
sprite.colors = new Float32Array([1,1,1,1]);
sprite.indices = new Uint16Array([0, 1, 3,2])//, 2]);
sprite._vertexBuffer = gl.createBuffer();
sprite._indexBuffer = gl.createBuffer();
sprite._uvBuffer = gl.createBuffer();
sprite._colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, sprite._vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, sprite.verticies, gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, sprite._uvBuffer);
gl.bufferData(gl.ARRAY_BUFFER, sprite.uvs, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, sprite._colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, sprite.colors, gl.STATIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, sprite._indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, sprite.indices, gl.STATIC_DRAW);
// return ( (x > 0) && ((x & (x - 1)) == 0) );
if(sprite.texture.baseTexture._glTexture)
{
gl.bindTexture(gl.TEXTURE_2D, sprite.texture.baseTexture._glTexture);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT);
sprite.texture.baseTexture._powerOf2 = true;
}
else
{
sprite.texture.baseTexture._powerOf2 = true;
}
}
/**
* Renders a Strip
*
* @method renderStrip
* @param strip {Strip} The strip to render
* @param projection {Object}
* @private
*/
PIXI.WebGLRenderGroup.prototype.renderStrip = function(strip, projection)
{
var gl = this.gl;
PIXI.activateStripShader();
var shader = PIXI.stripShader;
var program = shader.program;
var m = PIXI.mat3.clone(strip.worldTransform);
PIXI.mat3.transpose(m);
// console.log(projection)
// set the matrix transform for the
gl.uniformMatrix3fv(shader.translationMatrix, false, m);
gl.uniform2f(shader.projectionVector, projection.x, projection.y);
gl.uniform2f(shader.offsetVector, -PIXI.offset.x, -PIXI.offset.y);
gl.uniform1f(shader.alpha, strip.worldAlpha);
/*
if(strip.blendMode == PIXI.blendModes.NORMAL)
{
gl.blendFunc(gl.ONE, gl.ONE_MINUS_SRC_ALPHA);
}
else
{
gl.blendFunc(gl.ONE, gl.ONE_MINUS_SRC_COLOR);
}
*/
//console.log("!!")
if(!strip.dirty)
{
gl.bindBuffer(gl.ARRAY_BUFFER, strip._vertexBuffer);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, strip.verticies)
gl.vertexAttribPointer(shader.aVertexPosition, 2, gl.FLOAT, false, 0, 0);
// update the uvs
gl.bindBuffer(gl.ARRAY_BUFFER, strip._uvBuffer);
gl.vertexAttribPointer(shader.aTextureCoord, 2, gl.FLOAT, false, 0, 0);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, strip.texture.baseTexture._glTexture);
gl.bindBuffer(gl.ARRAY_BUFFER, strip._colorBuffer);
gl.vertexAttribPointer(shader.colorAttribute, 1, gl.FLOAT, false, 0, 0);
// dont need to upload!
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, strip._indexBuffer);
}
else
{
strip.dirty = false;
gl.bindBuffer(gl.ARRAY_BUFFER, strip._vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, strip.verticies, gl.STATIC_DRAW)
gl.vertexAttribPointer(shader.aVertexPosition, 2, gl.FLOAT, false, 0, 0);
// update the uvs
gl.bindBuffer(gl.ARRAY_BUFFER, strip._uvBuffer);
gl.bufferData(gl.ARRAY_BUFFER, strip.uvs, gl.STATIC_DRAW)
gl.vertexAttribPointer(shader.aTextureCoord, 2, gl.FLOAT, false, 0, 0);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, strip.texture.baseTexture._glTexture);
// console.log(strip.texture.baseTexture._glTexture)
gl.bindBuffer(gl.ARRAY_BUFFER, strip._colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, strip.colors, gl.STATIC_DRAW)
gl.vertexAttribPointer(shader.colorAttribute, 1, gl.FLOAT, false, 0, 0);
// dont need to upload!
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, strip._indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, strip.indices, gl.STATIC_DRAW);
}
gl.drawElements(gl.TRIANGLE_STRIP, strip.indices.length, gl.UNSIGNED_SHORT, 0);
PIXI.deactivateStripShader();
//gl.useProgram(PIXI.currentProgram);
}
/**
* Renders a TilingSprite
*
* @method renderTilingSprite
* @param sprite {TilingSprite} The tiling sprite to render
* @param projectionMatrix {Object}
* @private
*/
PIXI.WebGLRenderGroup.prototype.renderTilingSprite = function(sprite, projectionMatrix)
{
var gl = this.gl;
var shaderProgram = PIXI.shaderProgram;
var tilePosition = sprite.tilePosition;
var tileScale = sprite.tileScale;
var offsetX = tilePosition.x/sprite.texture.baseTexture.width;
var offsetY = tilePosition.y/sprite.texture.baseTexture.height;
var scaleX = (sprite.width / sprite.texture.baseTexture.width) / tileScale.x;
var scaleY = (sprite.height / sprite.texture.baseTexture.height) / tileScale.y;
sprite.uvs[0] = 0 - offsetX;
sprite.uvs[1] = 0 - offsetY;
sprite.uvs[2] = (1 * scaleX) -offsetX;
sprite.uvs[3] = 0 - offsetY;
sprite.uvs[4] = (1 *scaleX) - offsetX;
sprite.uvs[5] = (1 *scaleY) - offsetY;
sprite.uvs[6] = 0 - offsetX;
sprite.uvs[7] = (1 *scaleY) - offsetY;
gl.bindBuffer(gl.ARRAY_BUFFER, sprite._uvBuffer);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, sprite.uvs)
this.renderStrip(sprite, projectionMatrix);
}
/**
* Initializes a strip to be rendered
*
* @method initStrip
* @param strip {Strip} The strip to initialize
* @private
*/
PIXI.WebGLRenderGroup.prototype.initStrip = function(strip)
{
// build the strip!
var gl = this.gl;
var shaderProgram = this.shaderProgram;
strip._vertexBuffer = gl.createBuffer();
strip._indexBuffer = gl.createBuffer();
strip._uvBuffer = gl.createBuffer();
strip._colorBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, strip._vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, strip.verticies, gl.DYNAMIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, strip._uvBuffer);
gl.bufferData(gl.ARRAY_BUFFER, strip.uvs, gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, strip._colorBuffer);
gl.bufferData(gl.ARRAY_BUFFER, strip.colors, gl.STATIC_DRAW);
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, strip._indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, strip.indices, gl.STATIC_DRAW);
}<|fim▁end|>
|
while(head.__next != nextRenderable)
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed values.
use {Atom, Namespace};
use context::QuirksMode;
use euclid::Size2D;
use font_metrics::{FontMetricsProvider, get_metrics_provider_for_product};
use media_queries::Device;
#[cfg(feature = "gecko")]
use properties;
use properties::{ComputedValues, LonghandId, StyleBuilder};
use rule_cache::RuleCacheConditions;
#[cfg(feature = "servo")]
use servo_url::ServoUrl;
use std::{f32, fmt};
use std::cell::RefCell;
#[cfg(feature = "servo")]
use std::sync::Arc;
use style_traits::ToCss;
use style_traits::cursor::Cursor;
use super::{CSSFloat, CSSInteger};
use super::generics::{GreaterThanOrEqualToOne, NonNegative};
use super::generics::grid::{GridLine as GenericGridLine, TrackBreadth as GenericTrackBreadth};
use super::generics::grid::{TrackSize as GenericTrackSize, TrackList as GenericTrackList};
use super::generics::grid::GridTemplateComponent as GenericGridTemplateComponent;
use super::specified;
pub use app_units::Au;
pub use properties::animated_properties::TransitionProperty;
#[cfg(feature = "gecko")]
pub use self::align::{AlignItems, AlignJustifyContent, AlignJustifySelf, JustifyItems};
pub use self::angle::Angle;
pub use self::background::{BackgroundSize, BackgroundRepeat};
pub use self::border::{BorderImageSlice, BorderImageWidth, BorderImageSideWidth};
pub use self::border::{BorderRadius, BorderCornerRadius, BorderSpacing};
pub use self::font::{FontSize, FontSizeAdjust, FontSynthesis, FontWeight, FontVariantAlternates};
pub use self::font::{FontFamily, FontLanguageOverride, FontVariantSettings, FontVariantEastAsian};
pub use self::font::{FontVariantLigatures, FontVariantNumeric, FontFeatureSettings};
pub use self::font::{MozScriptLevel, MozScriptMinSize, MozScriptSizeMultiplier, XTextZoom, XLang};
pub use self::box_::{AnimationIterationCount, AnimationName, OverscrollBehavior, ScrollSnapType, VerticalAlign};
pub use self::color::{Color, ColorPropertyValue, RGBAColor};
pub use self::effects::{BoxShadow, Filter, SimpleShadow};
pub use self::flex::FlexBasis;
pub use self::image::{Gradient, GradientItem, Image, ImageLayer, LineDirection, MozImageRect};
#[cfg(feature = "gecko")]
pub use self::gecko::ScrollSnapPoint;
pub use self::rect::LengthOrNumberRect;
pub use super::{Auto, Either, None_};
pub use super::specified::{BorderStyle, TextDecorationLine};
pub use self::length::{CalcLengthOrPercentage, Length, LengthOrNone, LengthOrNumber, LengthOrPercentage};
pub use self::length::{LengthOrPercentageOrAuto, LengthOrPercentageOrNone, MaxLength, MozLength};
pub use self::length::{CSSPixelLength, NonNegativeLength, NonNegativeLengthOrPercentage};
pub use self::percentage::Percentage;
pub use self::position::{Position, GridAutoFlow};
pub use self::svg::{SVGLength, SVGOpacity, SVGPaint, SVGPaintKind, SVGStrokeDashArray, SVGWidth};
pub use self::table::XSpan;
pub use self::text::{InitialLetter, LetterSpacing, LineHeight, TextOverflow, WordSpacing};
pub use self::time::Time;
pub use self::transform::{TimingFunction, Transform, TransformOperation, TransformOrigin};
pub use self::ui::MozForceBrokenImageIcon;
#[cfg(feature = "gecko")]
pub mod align;
pub mod angle;
pub mod background;
pub mod basic_shape;
pub mod border;
#[path = "box.rs"]
pub mod box_;
pub mod color;
pub mod effects;
pub mod flex;
pub mod font;
pub mod image;
#[cfg(feature = "gecko")]
pub mod gecko;
pub mod length;
pub mod percentage;
pub mod position;
pub mod rect;
pub mod svg;
pub mod table;
pub mod text;
pub mod time;
pub mod transform;
pub mod ui;
/// A `Context` is all the data a specified value could ever need to compute
/// itself and be transformed to a computed value.
pub struct Context<'a> {
/// Whether the current element is the root element.
pub is_root_element: bool,
/// Values accessed through this need to be in the properties "computed
/// early": color, text-decoration, font-size, display, position, float,
/// border-*-style, outline-style, font-family, writing-mode...
pub builder: StyleBuilder<'a>,
/// A cached computed system font value, for use by gecko.
///
/// See properties/longhands/font.mako.rs
#[cfg(feature = "gecko")]
pub cached_system_font: Option<properties::longhands::system_font::ComputedSystemFont>,
/// A dummy option for servo so initializing a computed::Context isn't
/// painful.
///
/// TODO(emilio): Make constructors for Context, and drop this.
#[cfg(feature = "servo")]
pub cached_system_font: Option<()>,
/// A font metrics provider, used to access font metrics to implement
/// font-relative units.
pub font_metrics_provider: &'a FontMetricsProvider,
/// Whether or not we are computing the media list in a media query
pub in_media_query: bool,
/// The quirks mode of this context.
pub quirks_mode: QuirksMode,
/// Whether this computation is being done for a SMIL animation.
///
/// This is used to allow certain properties to generate out-of-range
/// values, which SMIL allows.
pub for_smil_animation: bool,
/// The property we are computing a value for, if it is a non-inherited
/// property. None if we are computed a value for an inherited property
/// or not computing for a property at all (e.g. in a media query
/// evaluation).
pub for_non_inherited_property: Option<LonghandId>,
/// The conditions to cache a rule node on the rule cache.
///
/// FIXME(emilio): Drop the refcell.
pub rule_cache_conditions: RefCell<&'a mut RuleCacheConditions>,
}
impl<'a> Context<'a> {
/// Creates a suitable context for media query evaluation, in which
/// font-relative units compute against the system_font, and executes `f`
/// with it.
pub fn for_media_query_evaluation<F, R>(
device: &Device,
quirks_mode: QuirksMode,
f: F,
) -> R
where
F: FnOnce(&Context) -> R
{
let mut conditions = RuleCacheConditions::default();
let default_values = device.default_computed_values();
let provider = get_metrics_provider_for_product();
let context = Context {
is_root_element: false,
builder: StyleBuilder::for_derived_style(device, default_values, None, None),
font_metrics_provider: &provider,
cached_system_font: None,
in_media_query: true,
quirks_mode,
for_smil_animation: false,
for_non_inherited_property: None,
rule_cache_conditions: RefCell::new(&mut conditions),
};
f(&context)
}
/// Whether the current element is the root element.
pub fn is_root_element(&self) -> bool {
self.is_root_element
}
/// The current device.
pub fn device(&self) -> &Device {
self.builder.device
}
/// The current viewport size, used to resolve viewport units.
pub fn viewport_size_for_viewport_unit_resolution(&self) -> Size2D<Au> {
self.builder.device.au_viewport_size_for_viewport_unit_resolution()
}
/// The default computed style we're getting our reset style from.
pub fn default_style(&self) -> &ComputedValues {
self.builder.default_style()
}
/// The current style.
pub fn style(&self) -> &StyleBuilder {
&self.builder
}
/// Apply text-zoom if enabled.
#[cfg(feature = "gecko")]
pub fn maybe_zoom_text(&self, size: NonNegativeLength) -> NonNegativeLength {
// We disable zoom for <svg:text> by unsetting the
// -x-text-zoom property, which leads to a false value
// in mAllowZoom
if self.style().get_font().gecko.mAllowZoom {
self.device().zoom_text(Au::from(size)).into()
} else {
size
}
}
/// (Servo doesn't do text-zoom)
#[cfg(feature = "servo")]
pub fn maybe_zoom_text(&self, size: NonNegativeLength) -> NonNegativeLength {
size
}
}
/// An iterator over a slice of computed values
#[derive(Clone)]
pub struct ComputedVecIter<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> {
cx: &'cx Context<'cx_a>,
values: &'a [S],
}
impl<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> ComputedVecIter<'a, 'cx, 'cx_a, S> {
/// Construct an iterator from a slice of specified values and a context
pub fn new(cx: &'cx Context<'cx_a>, values: &'a [S]) -> Self {
ComputedVecIter {
cx: cx,
values: values,
}
}
}
impl<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> ExactSizeIterator for ComputedVecIter<'a, 'cx, 'cx_a, S> {
fn len(&self) -> usize {
self.values.len()
}
}
impl<'a, 'cx, 'cx_a: 'cx, S: ToComputedValue + 'a> Iterator for ComputedVecIter<'a, 'cx, 'cx_a, S> {
type Item = S::ComputedValue;
fn next(&mut self) -> Option<Self::Item> {
if let Some((next, rest)) = self.values.split_first() {
let ret = next.to_computed_value(self.cx);
self.values = rest;
Some(ret)
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.values.len(), Some(self.values.len()))
}
}
/// A trait to represent the conversion between computed and specified values.
///
/// This trait is derivable with `#[derive(ToComputedValue)]`. The derived
/// implementation just calls `ToComputedValue::to_computed_value` on each field
/// of the passed value, or `Clone::clone` if the field is annotated with
/// `#[compute(clone)]`.
pub trait ToComputedValue {
/// The computed value type we're going to be converted to.
type ComputedValue;
/// Convert a specified value to a computed value, using itself and the data
/// inside the `Context`.
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue;
#[inline]
/// Convert a computed value to specified value form.
///
/// This will be used for recascading during animation.
/// Such from_computed_valued values should recompute to the same value.
fn from_computed_value(computed: &Self::ComputedValue) -> Self;
}
impl<A, B> ToComputedValue for (A, B)
where A: ToComputedValue, B: ToComputedValue,
{
type ComputedValue = (
<A as ToComputedValue>::ComputedValue,
<B as ToComputedValue>::ComputedValue,
);
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
(self.0.to_computed_value(context), self.1.to_computed_value(context))
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
(A::from_computed_value(&computed.0), B::from_computed_value(&computed.1))
}
}
impl<T> ToComputedValue for Option<T>
where T: ToComputedValue
{
type ComputedValue = Option<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.as_ref().map(|item| item.to_computed_value(context))
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed.as_ref().map(T::from_computed_value)
}
}
impl<T> ToComputedValue for Size2D<T>
where T: ToComputedValue
{
type ComputedValue = Size2D<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
Size2D::new(
self.width.to_computed_value(context),
self.height.to_computed_value(context),
)
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
Size2D::new(
T::from_computed_value(&computed.width),
T::from_computed_value(&computed.height),
)
}
}
impl<T> ToComputedValue for Vec<T>
where T: ToComputedValue
{
type ComputedValue = Vec<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.iter().map(|item| item.to_computed_value(context)).collect()
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed.iter().map(T::from_computed_value).collect()
}
}
impl<T> ToComputedValue for Box<T>
where T: ToComputedValue
{
type ComputedValue = Box<<T as ToComputedValue>::ComputedValue>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
Box::new(T::to_computed_value(self, context))
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
Box::new(T::from_computed_value(computed))
}
}
impl<T> ToComputedValue for Box<[T]>
where T: ToComputedValue
{
type ComputedValue = Box<[<T as ToComputedValue>::ComputedValue]>;
#[inline]
fn to_computed_value(&self, context: &Context) -> Self::ComputedValue {
self.iter().map(|item| item.to_computed_value(context)).collect::<Vec<_>>().into_boxed_slice()
}
#[inline]
fn from_computed_value(computed: &Self::ComputedValue) -> Self {
computed.iter().map(T::from_computed_value).collect::<Vec<_>>().into_boxed_slice()
}
}
trivial_to_computed_value!(());
trivial_to_computed_value!(bool);
trivial_to_computed_value!(f32);
trivial_to_computed_value!(i32);
trivial_to_computed_value!(u8);
trivial_to_computed_value!(u16);
trivial_to_computed_value!(u32);
trivial_to_computed_value!(Atom);
trivial_to_computed_value!(BorderStyle);
trivial_to_computed_value!(Cursor);
trivial_to_computed_value!(Namespace);
trivial_to_computed_value!(String);
/// A `<number>` value.
pub type Number = CSSFloat;
/// A wrapper of Number, but the value >= 0.
pub type NonNegativeNumber = NonNegative<CSSFloat>;
impl From<CSSFloat> for NonNegativeNumber {
#[inline]
fn from(number: CSSFloat) -> NonNegativeNumber {
NonNegative::<CSSFloat>(number)
}
}
impl From<NonNegativeNumber> for CSSFloat {
#[inline]
fn from(number: NonNegativeNumber) -> CSSFloat {
number.0
}
}
/// A wrapper of Number, but the value >= 1.
pub type GreaterThanOrEqualToOneNumber = GreaterThanOrEqualToOne<CSSFloat>;
impl From<CSSFloat> for GreaterThanOrEqualToOneNumber {
#[inline]
fn from(number: CSSFloat) -> GreaterThanOrEqualToOneNumber {
GreaterThanOrEqualToOne::<CSSFloat>(number)
}
}
impl From<GreaterThanOrEqualToOneNumber> for CSSFloat {
#[inline]
fn from(number: GreaterThanOrEqualToOneNumber) -> CSSFloat {
number.0<|fim▁hole|>#[allow(missing_docs)]
#[derive(Clone, ComputeSquaredDistance, Copy, Debug, MallocSizeOf, PartialEq, ToCss)]
pub enum NumberOrPercentage {
Percentage(Percentage),
Number(Number),
}
impl ToComputedValue for specified::NumberOrPercentage {
type ComputedValue = NumberOrPercentage;
#[inline]
fn to_computed_value(&self, context: &Context) -> NumberOrPercentage {
match *self {
specified::NumberOrPercentage::Percentage(percentage) =>
NumberOrPercentage::Percentage(percentage.to_computed_value(context)),
specified::NumberOrPercentage::Number(number) =>
NumberOrPercentage::Number(number.to_computed_value(context)),
}
}
#[inline]
fn from_computed_value(computed: &NumberOrPercentage) -> Self {
match *computed {
NumberOrPercentage::Percentage(percentage) =>
specified::NumberOrPercentage::Percentage(ToComputedValue::from_computed_value(&percentage)),
NumberOrPercentage::Number(number) =>
specified::NumberOrPercentage::Number(ToComputedValue::from_computed_value(&number)),
}
}
}
/// A type used for opacity.
pub type Opacity = CSSFloat;
/// A `<integer>` value.
pub type Integer = CSSInteger;
/// <integer> | auto
pub type IntegerOrAuto = Either<CSSInteger, Auto>;
impl IntegerOrAuto {
/// Returns the integer value if it is an integer, otherwise return
/// the given value.
pub fn integer_or(&self, auto_value: CSSInteger) -> CSSInteger {
match *self {
Either::First(n) => n,
Either::Second(Auto) => auto_value,
}
}
}
/// A wrapper of Integer, but only accept a value >= 1.
pub type PositiveInteger = GreaterThanOrEqualToOne<CSSInteger>;
impl From<CSSInteger> for PositiveInteger {
#[inline]
fn from(int: CSSInteger) -> PositiveInteger {
GreaterThanOrEqualToOne::<CSSInteger>(int)
}
}
/// PositiveInteger | auto
pub type PositiveIntegerOrAuto = Either<PositiveInteger, Auto>;
/// <length> | <percentage> | <number>
pub type LengthOrPercentageOrNumber = Either<Number, LengthOrPercentage>;
/// NonNegativeLengthOrPercentage | NonNegativeNumber
pub type NonNegativeLengthOrPercentageOrNumber = Either<NonNegativeNumber, NonNegativeLengthOrPercentage>;
#[allow(missing_docs)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
#[derive(Clone, ComputeSquaredDistance, Copy, Debug, PartialEq)]
/// A computed cliprect for clip and image-region
pub struct ClipRect {
pub top: Option<Length>,
pub right: Option<Length>,
pub bottom: Option<Length>,
pub left: Option<Length>,
}
impl ToCss for ClipRect {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str("rect(")?;
if let Some(top) = self.top {
top.to_css(dest)?;
dest.write_str(", ")?;
} else {
dest.write_str("auto, ")?;
}
if let Some(right) = self.right {
right.to_css(dest)?;
dest.write_str(", ")?;
} else {
dest.write_str("auto, ")?;
}
if let Some(bottom) = self.bottom {
bottom.to_css(dest)?;
dest.write_str(", ")?;
} else {
dest.write_str("auto, ")?;
}
if let Some(left) = self.left {
left.to_css(dest)?;
} else {
dest.write_str("auto")?;
}
dest.write_str(")")
}
}
/// rect(...) | auto
pub type ClipRectOrAuto = Either<ClipRect, Auto>;
/// The computed value of a grid `<track-breadth>`
pub type TrackBreadth = GenericTrackBreadth<LengthOrPercentage>;
/// The computed value of a grid `<track-size>`
pub type TrackSize = GenericTrackSize<LengthOrPercentage>;
/// The computed value of a grid `<track-list>`
/// (could also be `<auto-track-list>` or `<explicit-track-list>`)
pub type TrackList = GenericTrackList<LengthOrPercentage, Integer>;
/// The computed value of a `<grid-line>`.
pub type GridLine = GenericGridLine<Integer>;
/// `<grid-template-rows> | <grid-template-columns>`
pub type GridTemplateComponent = GenericGridTemplateComponent<LengthOrPercentage, Integer>;
impl ClipRectOrAuto {
/// Return an auto (default for clip-rect and image-region) value
pub fn auto() -> Self {
Either::Second(Auto)
}
/// Check if it is auto
pub fn is_auto(&self) -> bool {
match *self {
Either::Second(_) => true,
_ => false
}
}
}
/// <color> | auto
pub type ColorOrAuto = Either<Color, Auto>;
/// The computed value of a CSS `url()`, resolved relative to the stylesheet URL.
#[cfg(feature = "servo")]
#[derive(Clone, Debug, Deserialize, MallocSizeOf, PartialEq, Serialize)]
pub enum ComputedUrl {
/// The `url()` was invalid or it wasn't specified by the user.
Invalid(#[ignore_malloc_size_of = "Arc"] Arc<String>),
/// The resolved `url()` relative to the stylesheet URL.
Valid(ServoUrl),
}
/// TODO: Properly build ComputedUrl for gecko
#[cfg(feature = "gecko")]
pub type ComputedUrl = specified::url::SpecifiedUrl;
#[cfg(feature = "servo")]
impl ComputedUrl {
/// Returns the resolved url if it was valid.
pub fn url(&self) -> Option<&ServoUrl> {
match *self {
ComputedUrl::Valid(ref url) => Some(url),
_ => None,
}
}
}
#[cfg(feature = "servo")]
impl ToCss for ComputedUrl {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
let string = match *self {
ComputedUrl::Valid(ref url) => url.as_str(),
ComputedUrl::Invalid(ref invalid_string) => invalid_string,
};
dest.write_str("url(")?;
string.to_css(dest)?;
dest.write_str(")")
}
}
/// <url> | <none>
pub type UrlOrNone = Either<ComputedUrl, None_>;<|fim▁end|>
|
}
}
|
<|file_name|>view.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
__author__ = '[email protected]'
from collections import OrderedDict
from math import pi
from Products.Five import BrowserView
from plone import api
import base64
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import six
LOG = logging.getLogger('evaluate')
class UpgradeIt(BrowserView):
def __call__(self):
portal_setup = api.portal.get_tool(name='portal_setup')
portal_setup.runImportStepFromProfile(
'profile-plonetheme.sunburst:default', 'cssregistry', run_dependencies=False)
portal_skins = api.portal.get_tool(name='portal_skins')
custom = portal_skins['custom']
for oid in ['main_template', 'base_properties', 'ploneCustom.css']:
if oid in custom:
api.content.delete(obj=custom[oid])
return "DONE"
class Result(object):
def __init__(self):
self.good = ''
self.details = {}
class EvaluateTestView(BrowserView):
no_text = 'Kein Textbaustein'
factors = {
'Meistens': 5,
'Manchmal': 3,
'Selten': 1,
'Nie': 0
}
pie_factors = {
'Meistens': 3,
'Manchmal': 2,
'Selten': 1,
'Nie': 0
}
chart_img = ''
def get_detail_elements(self):
zope_script = self.context.restrictedTraverse('text_detail_elements')
return zope_script()
def get_summary_elements(self):
zope_script = self.context.restrictedTraverse('text_summary_elements')
return zope_script()
def text_blocks(self):
result = OrderedDict()
form = self.request.form
summary = 0
df = OrderedDict()
elements = self.get_detail_elements()
for i, group in enumerate(elements.keys()):
if group not in form:
continue
group_title = self.context[group].Title()
result[group_title] = Result()
good_values = []
for key, val in form[group].items():
summary += self.factors[val]
element = elements[group].get(key, self.no_text)
title = element.get('Titel', group_title)
if val == 'Meistens':
good_values.append(title)
continue
text = element.get(val)
if not text:
continue
if val in element:
result[group_title].details[title] = text
else:
result[group_title].details[title] = element.get('default')
u_group_title = unicode(group_title, 'utf-8')
if u_group_title not in df:
df[u_group_title] = 0
df[u_group_title] += self.pie_factors[val]
if good_values:
result[group_title].good = ', '.join(good_values)
if not result[group_title].details:
LOG.warn('Details of group {0} are empty!'.format(group))
summary_elements = self.get_summary_elements()
if summary < 75:
result['summary'] = summary_elements['bad']
elif 75 >= summary < 130:
result['summary'] = summary_elements['med']
else:
result['summary'] = summary_elements['good']
self.chart_img = 'data:image/jpeg;base64, ' + self.get_radar_chart(df)
self.legend = df.keys()
return result
def get_radar_chart(self, df):
LOG.info('{0}'.format(df))
# number of variable
categories = list(df)
N = len(categories)
<|fim▁hole|> values = df.values()
values.append(values[0])
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
fig = plt.figure()
ax = plt.subplot(111, polar=True)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], range(1, N+1), color='grey', size=8, rotation='vertical')
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([])
plt.ylim(0, min(21, max(values)) + 1)
# Plot data
ax.plot(angles, values, linewidth=1, linestyle='solid')
# Fill area
ax.fill(angles, values, 'b', alpha=0.1)
fig.savefig('test.png')
img = six.BytesIO()
fig.savefig(img, format='png')
img.seek(0)
return base64.b64encode(img.read())<|fim▁end|>
|
# We are going to plot the first line of the data frame.
# But we need to repeat the first value to close the circular graph:
|
<|file_name|>persistence_strategies.py<|end_file_name|><|fim▁begin|>__author__ = 'dkador'
class BasePersistenceStrategy(object):
"""
A persistence strategy is responsible for persisting a given event
somewhere (i.e. directly to Keen, a local cache, a Redis queue, etc.)
"""
def persist(self, event):
"""Persists the given event somewhere.
:param event: the event to persist
"""
raise NotImplementedError()
class DirectPersistenceStrategy(BasePersistenceStrategy):
"""
A persistence strategy that saves directly to Keen and bypasses any local
cache.
"""
def __init__(self, api):
""" Initializer for DirectPersistenceStrategy.
:param api: the Keen Api object used to communicate with the Keen API
"""
super(DirectPersistenceStrategy, self).__init__()<|fim▁hole|> self.api = api
def persist(self, event):
""" Posts the given event directly to the Keen API.
:param event: an Event to persist
"""
self.api.post_event(event)
def batch_persist(self, events):
""" Posts the given events directly to the Keen API.
:param events: a batch of events to persist
"""
self.api.post_events(events)
class RedisPersistenceStrategy(BasePersistenceStrategy):
"""
A persistence strategy that persists events to Redis for later processing.
Not yet implemented.
"""
pass
class FilePersistenceStrategy(BasePersistenceStrategy):
"""
A persistence strategy that persists events to the local file system for
later processing.
Not yet implemented.
"""
pass<|fim▁end|>
| |
<|file_name|>Footer.js<|end_file_name|><|fim▁begin|>// @flow
/* **********************************************************
* File: Footer.js
*
* Brief: The react footer component
*
* Authors: Craig Cheney, George Whitfield
*
* 2017.04.27 CC - Document created
*
********************************************************* */
import React, { Component } from 'react';
import { Grid, Col, Row } from 'react-bootstrap';
let mitImagePath = '../resources/img/mitLogo.png';
/* Set mitImagePath to new path */
if (process.resourcesPath !== undefined) {
mitImagePath = (`${String(process.resourcesPath)}resources/img/mitLogo.png`);
// mitImagePath = `${process.resourcesPath}resources/img/mitLogo.png`;
}
// const nativeImage = require('electron').nativeImage;
// const mitLogoImage = nativeImage.createFromPath(mitImagePath);
const footerStyle = {
position: 'absolute',
right: 0,
bottom: 0,
left: 0,
color: '#9d9d9d',
backgroundColor: '#222',
height: '25px',
textAlign: 'center'
};
const mitLogoStyle = {
height: '20px'<|fim▁hole|>const bilabLogoStyle = {
height: '20px'
};
export default class Footer extends Component<{}> {
render() {
return (
<Grid className='Footer' style={footerStyle} fluid>
<Row>
<Col xs={4}><img src={'../resources/img/mitLogo.png' || mitImagePath} style={mitLogoStyle} alt='MICA' /></Col>
<Col xs={4}>The MICA Group © 2017</Col>
<Col xs={4}><img src='../resources/img/bilabLogo_white.png' style={bilabLogoStyle} alt='BioInstrumentation Lab' /></Col>
</Row>
</Grid>
);
}
}
/* [] - END OF FILE */<|fim▁end|>
|
};
|
<|file_name|>common.js<|end_file_name|><|fim▁begin|>/*
Copyright (c) 2004-2006, The Dojo Foundation
All Rights Reserved.
Licensed under the Academic Free License version 2.1 or above OR the
modified BSD license. For more information on Dojo licensing, see:
http://dojotoolkit.org/community/licensing.shtml
*/
dojo.provide("dojo.validate.common");
dojo.require("dojo.regexp");
dojo.validate.isText = function(/*String*/value, /*Object?*/flags){
// summary:
// Checks if a string has non whitespace characters.
// Parameters allow you to constrain the length.
//
// value: A string
// flags: {length: Number, minlength: Number, maxlength: Number}
// flags.length If set, checks if there are exactly flags.length number of characters.
// flags.minlength If set, checks if there are at least flags.minlength number of characters.
// flags.maxlength If set, checks if there are at most flags.maxlength number of characters.
flags = (typeof flags == "object") ? flags : {};
// test for text
if(/^\s*$/.test(value)){ return false; } // Boolean
// length tests
if(typeof flags.length == "number" && flags.length != value.length){ return false; } // Boolean
if(typeof flags.minlength == "number" && flags.minlength > value.length){ return false; } // Boolean
if(typeof flags.maxlength == "number" && flags.maxlength < value.length){ return false; } // Boolean
return true; // Boolean
}
dojo.validate.isInteger = function(/*String*/value, /*Object?*/flags){
// summary:
// Validates whether a string is in an integer format
//
// value A string
// flags {signed: Boolean|[true,false], separator: String}
// flags.signed The leading plus-or-minus sign. Can be true, false, or [true, false].
// Default is [true, false], (i.e. sign is optional).
// flags.separator The character used as the thousands separator. Default is no separator.
// For more than one symbol use an array, e.g. [",", ""], makes ',' optional.
var re = new RegExp("^" + dojo.regexp.integer(flags) + "$");
return re.test(value); // Boolean
}
dojo.validate.isRealNumber = function(/*String*/value, /*Object?*/flags){
// summary:
// Validates whether a string is a real valued number.
// Format is the usual exponential notation.
//
// value: A string
// flags: {places: Number, decimal: String, exponent: Boolean|[true,false], eSigned: Boolean|[true,false], ...}
// flags.places The integer number of decimal places.
// If not given, the decimal part is optional and the number of places is unlimited.
// flags.decimal The character used for the decimal point. Default is ".".
// flags.exponent Express in exponential notation. Can be true, false, or [true, false].
// Default is [true, false], (i.e. the exponential part is optional).
// flags.eSigned The leading plus-or-minus sign on the exponent. Can be true, false,
// or [true, false]. Default is [true, false], (i.e. sign is optional).
// flags in regexp.integer can be applied.
var re = new RegExp("^" + dojo.regexp.realNumber(flags) + "$");
return re.test(value); // Boolean
}
dojo.validate.isCurrency = function(/*String*/value, /*Object?*/flags){
// summary:
// Validates whether a string denotes a monetary value.
// value: A string
// flags: {signed:Boolean|[true,false], symbol:String, placement:String, separator:String,
// fractional:Boolean|[true,false], decimal:String}
// flags.signed The leading plus-or-minus sign. Can be true, false, or [true, false].
// Default is [true, false], (i.e. sign is optional).
// flags.symbol A currency symbol such as Yen "�", Pound "�", or the Euro sign "�".
// Default is "$". For more than one symbol use an array, e.g. ["$", ""], makes $ optional.
// flags.placement The symbol can come "before" the number or "after". Default is "before".
// flags.separator The character used as the thousands separator. The default is ",".
// flags.fractional The appropriate number of decimal places for fractional currency (e.g. cents)
// Can be true, false, or [true, false]. Default is [true, false], (i.e. cents are optional).
// flags.decimal The character used for the decimal point. Default is ".".
var re = new RegExp("^" + dojo.regexp.currency(flags) + "$");
return re.test(value); // Boolean
}
dojo.validate.isInRange = function(/*String*/value, /*Object?*/flags){
//summary:
// Validates whether a string denoting an integer,
// real number, or monetary value is between a max and min.
//
// value: A string
// flags: {max:Number, min:Number, decimal:String}
<|fim▁hole|> //stripping the seperator allows NaN to perform as expected, if no separator, we assume ','
//once i18n support is ready for this, instead of assuming, we default to i18n's recommended value
value = value.replace((dojo.lang.has(flags,'separator'))?flags.separator:',','');
if(isNaN(value)){
return false; // Boolean
}
// assign default values to missing paramters
flags = (typeof flags == "object") ? flags : {};
var max = (typeof flags.max == "number") ? flags.max : Infinity;
var min = (typeof flags.min == "number") ? flags.min : -Infinity;
var dec = (typeof flags.decimal == "string") ? flags.decimal : ".";
// splice out anything not part of a number
var pattern = "[^" + dec + "\\deE+-]";
value = value.replace(RegExp(pattern, "g"), "");
// trim ends of things like e, E, or the decimal character
value = value.replace(/^([+-]?)(\D*)/, "$1");
value = value.replace(/(\D*)$/, "");
// replace decimal with ".". The minus sign '-' could be the decimal!
pattern = "(\\d)[" + dec + "](\\d)";
value = value.replace(RegExp(pattern, "g"), "$1.$2");
value = Number(value);
if ( value < min || value > max ) { return false; } // Boolean
return true; // Boolean
}
dojo.validate.isNumberFormat = function(/*String*/value, /*Object?*/flags){
// summary:
// Validates any sort of number based format
//
// description:
// Use it for phone numbers, social security numbers, zip-codes, etc.
// The value can be validated against one format or one of multiple formats.
//
// Format
// # Stands for a digit, 0-9.
// ? Stands for an optional digit, 0-9 or nothing.
// All other characters must appear literally in the expression.
//
// Example
// "(###) ###-####" -> (510) 542-9742
// "(###) ###-#### x#???" -> (510) 542-9742 x153
// "###-##-####" -> 506-82-1089 i.e. social security number
// "#####-####" -> 98225-1649 i.e. zip code
//
// value: A string
// flags: {format:String}
// flags.format A string or an Array of strings for multiple formats.
var re = new RegExp("^" + dojo.regexp.numberFormat(flags) + "$", "i");
return re.test(value); // Boolean
}
dojo.validate.isValidLuhn = function(/*String*/value){
//summary: Compares value against the Luhn algorithm to verify its integrity
var sum, parity, curDigit;
if(typeof value!='string'){
value = String(value);
}
value = value.replace(/[- ]/g,''); //ignore dashes and whitespaces
parity = value.length%2;
sum=0;
for(var i=0;i<value.length;i++){
curDigit = parseInt(value.charAt(i));
if(i%2==parity){
curDigit*=2;
}
if(curDigit>9){
curDigit-=9;
}
sum+=curDigit;
}
return !(sum%10);
}
/**
Procedural API Description
The main aim is to make input validation expressible in a simple format.
You define profiles which declare the required and optional fields and any constraints they might have.
The results are provided as an object that makes it easy to handle missing and invalid input.
Usage
var results = dojo.validate.check(form, profile);
Profile Object
var profile = {
// filters change the field value and are applied before validation.
trim: ["tx1", "tx2"],
uppercase: ["tx9"],
lowercase: ["tx5", "tx6", "tx7"],
ucfirst: ["tx10"],
digit: ["tx11"],
// required input fields that are blank will be reported missing.
// required radio button groups and drop-down lists with no selection will be reported missing.
// checkbox groups and selectboxes can be required to have more than one value selected.
// List required fields by name and use this notation to require more than one value: {checkboxgroup: 2}, {selectboxname: 3}.
required: ["tx7", "tx8", "pw1", "ta1", "rb1", "rb2", "cb3", "s1", {"doubledip":2}, {"tripledip":3}],
// dependant/conditional fields are required if the target field is present and not blank.
// At present only textbox, password, and textarea fields are supported.
dependencies: {
cc_exp: "cc_no",
cc_type: "cc_no",
},
// Fields can be validated using any boolean valued function.
// Use arrays to specify parameters in addition to the field value.
constraints: {
field_name1: myValidationFunction,
field_name2: dojo.validate.isInteger,
field_name3: [myValidationFunction, additional parameters],
field_name4: [dojo.validate.isValidDate, "YYYY.MM.DD"],
field_name5: [dojo.validate.isEmailAddress, false, true],
},
// Confirm is a sort of conditional validation.
// It associates each field in its property list with another field whose value should be equal.
// If the values are not equal, the field in the property list is reported as Invalid. Unless the target field is blank.
confirm: {
email_confirm: "email",
pw2: "pw1",
}
};
Results Object
isSuccessful(): Returns true if there were no invalid or missing fields, else it returns false.
hasMissing(): Returns true if the results contain any missing fields.
getMissing(): Returns a list of required fields that have values missing.
isMissing(field): Returns true if the field is required and the value is missing.
hasInvalid(): Returns true if the results contain fields with invalid data.
getInvalid(): Returns a list of fields that have invalid values.
isInvalid(field): Returns true if the field has an invalid value.
*/<|fim▁end|>
|
// flags.max A number, which the value must be less than or equal to for the validation to be true.
// flags.min A number, which the value must be greater than or equal to for the validation to be true.
// flags.decimal The character used for the decimal point. Default is ".".
|
<|file_name|>version.rs<|end_file_name|><|fim▁begin|>use crate::internal::consts;
// ========================================================================= //
/// The CFB format version to use.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Version {
/// Version 3, which uses 512-byte sectors.
V3,
/// Version 4, which uses 4096-byte sectors.
V4,
}
impl Version {
/// Returns the version enum for the given version number, or `None`.
pub fn from_number(number: u16) -> Option<Version> {
match number {
3 => Some(Version::V3),
4 => Some(Version::V4),
_ => None,
}
}
/// Returns the version number for this version.
pub fn number(self) -> u16 {
match self {
Version::V3 => 3,
Version::V4 => 4,
}
}<|fim▁hole|>
/// Returns the sector shift used in this version.
pub fn sector_shift(self) -> u16 {
match self {
Version::V3 => 9, // 512-byte sectors
Version::V4 => 12, // 4096-byte sectors
}
}
/// Returns the length of sectors used in this version.
///
/// ```
/// use cfb::Version;
/// assert_eq!(Version::V3.sector_len(), 512);
/// assert_eq!(Version::V4.sector_len(), 4096);
/// ```
pub fn sector_len(self) -> usize {
1 << (self.sector_shift() as usize)
}
/// Returns the bitmask used for reading stream lengths in this version.
pub fn stream_len_mask(self) -> u64 {
match self {
Version::V3 => 0xffffffff,
Version::V4 => 0xffffffffffffffff,
}
}
/// Returns the number of directory entries per sector in this version.
pub fn dir_entries_per_sector(self) -> usize {
self.sector_len() / consts::DIR_ENTRY_LEN
}
}
// ========================================================================= //
#[cfg(test)]
mod tests {
use super::Version;
#[test]
fn number_round_trip() {
for &version in &[Version::V3, Version::V4] {
assert_eq!(Version::from_number(version.number()), Some(version));
}
}
}
// ========================================================================= //<|fim▁end|>
| |
<|file_name|>Visitor.java<|end_file_name|><|fim▁begin|>/*
*******************************************************************************
* Copyright (C) 2002-2012, International Business Machines Corporation and *
* others. All Rights Reserved. *
*******************************************************************************
*/
package com.ibm.icu.dev.util;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import com.ibm.icu.text.UnicodeSet;
import com.ibm.icu.text.UnicodeSetIterator;
public abstract class Visitor {
public void doAt(Object item) {
if (item instanceof Collection) {
doAt((Collection) item);
} else if (item instanceof Map) {
doAt((Map) item);
} else if (item instanceof Object[]) {
doAt((Object[]) item);
} else if (item instanceof UnicodeSet) {
doAt((UnicodeSet) item);
} else {
doSimpleAt(item);
}
}
public int count(Object item) {
if (item instanceof Collection) {
return ((Collection) item).size();
} else if (item instanceof Map) {
return ((Map) item).size();
} else if (item instanceof Object[]) {
return ((Object[]) item).length;
} else if (item instanceof UnicodeSet) {
return ((UnicodeSet) item).size();
} else {
return 1;
}
}
// the default implementation boxing
public void doAt(int o) {
doSimpleAt(new Integer(o));
}
public void doAt(double o) {
doSimpleAt(new Double(o));
}
public void doAt(char o) {
doSimpleAt(new Character(o));
}
// for subclassing
protected void doAt (Collection c) {
if (c.size() == 0) doBefore(c, null);
Iterator it = c.iterator();
boolean first = true;
Object last = null;
while (it.hasNext()) {
Object item = it.next();
if (first) {
doBefore(c, item);
first = false;
} else {
doBetween(c, last, item);
}
doAt(last=item);
}
doAfter(c, last);
}
protected void doAt (Map c) {
doAt(c.entrySet());
}
protected void doAt (UnicodeSet c) {
if (c.size() == 0) doBefore(c, null);
UnicodeSetIterator it = new UnicodeSetIterator(c);
boolean first = true;
Object last = null;
Object item;
CodePointRange cpr0 = new CodePointRange();
CodePointRange cpr1 = new CodePointRange();
CodePointRange cpr;
while(it.nextRange()) {
if (it.codepoint == UnicodeSetIterator.IS_STRING) {
item = it.string;
} else {
cpr = last == cpr0 ? cpr1 : cpr0; // make sure we don't override last
cpr.codepoint = it.codepoint;
cpr.codepointEnd = it.codepointEnd;
item = cpr;
}
if (!first) {
doBefore(c, item);
first = true;
} else {
doBetween(c, last, item);
}
doAt(last = item);
}
doAfter(c, last);
}
protected void doAt (Object[] c) {
doBefore(c, c.length == 0 ? null : c[0]);
Object last = null;
for (int i = 0; i < c.length; ++i) {
if (i != 0) doBetween(c, last, c[i]);<|fim▁hole|> }
doAfter(c, last);
}
public static class CodePointRange{
public int codepoint, codepointEnd;
}
// ===== MUST BE OVERRIDEN =====
abstract protected void doBefore(Object container, Object item);
abstract protected void doBetween(Object container, Object lastItem, Object nextItem);
abstract protected void doAfter(Object container, Object item);
abstract protected void doSimpleAt(Object o);
}<|fim▁end|>
|
doAt(last = c[i]);
|
<|file_name|>object-does-not-impl-trait.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.<|fim▁hole|>// Test that an object type `@Foo` is not considered to implement the
// trait `Foo`. Issue #5087.
trait Foo {}
fn take_foo<F:Foo>(f: F) {}
fn take_object(f: @Foo) { take_foo(f); } //~ ERROR failed to find an implementation of trait
fn main() {}<|fim▁end|>
| |
<|file_name|>AvgAggFunction.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.functions.aggfunctions;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.expressions.Expression;
import org.apache.flink.table.expressions.UnresolvedCallExpression;
import org.apache.flink.table.expressions.UnresolvedReferenceExpression;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.DecimalType;
import org.apache.flink.table.types.logical.utils.LogicalTypeMerging;
import java.math.BigDecimal;
import static org.apache.flink.table.expressions.ApiExpressionUtils.unresolvedRef;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.cast;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.div;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.equalTo;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.ifThenElse;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.isNull;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.literal;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.minus;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.nullOf;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.plus;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.typeLiteral;
/** built-in avg aggregate function. */
public abstract class AvgAggFunction extends DeclarativeAggregateFunction {
private UnresolvedReferenceExpression sum = unresolvedRef("sum");
private UnresolvedReferenceExpression count = unresolvedRef("count");
public abstract DataType getSumType();
@Override
public int operandCount() {
return 1;
}
@Override
public UnresolvedReferenceExpression[] aggBufferAttributes() {
return new UnresolvedReferenceExpression[] {sum, count};
}
@Override<|fim▁hole|> @Override
public Expression[] initialValuesExpressions() {
return new Expression[] {
/* sum = */ literal(0L, getSumType().notNull()), /* count = */ literal(0L)
};
}
@Override
public Expression[] accumulateExpressions() {
return new Expression[] {
/* sum = */ adjustSumType(ifThenElse(isNull(operand(0)), sum, plus(sum, operand(0)))),
/* count = */ ifThenElse(isNull(operand(0)), count, plus(count, literal(1L))),
};
}
@Override
public Expression[] retractExpressions() {
return new Expression[] {
/* sum = */ adjustSumType(ifThenElse(isNull(operand(0)), sum, minus(sum, operand(0)))),
/* count = */ ifThenElse(isNull(operand(0)), count, minus(count, literal(1L))),
};
}
@Override
public Expression[] mergeExpressions() {
return new Expression[] {
/* sum = */ adjustSumType(plus(sum, mergeOperand(sum))),
/* count = */ plus(count, mergeOperand(count))
};
}
private UnresolvedCallExpression adjustSumType(UnresolvedCallExpression sumExpr) {
return cast(sumExpr, typeLiteral(getSumType()));
}
/** If all input are nulls, count will be 0 and we will get null after the division. */
@Override
public Expression getValueExpression() {
Expression ifTrue = nullOf(getResultType());
Expression ifFalse = cast(div(sum, count), typeLiteral(getResultType()));
return ifThenElse(equalTo(count, literal(0L)), ifTrue, ifFalse);
}
/** Built-in Byte Avg aggregate function. */
public static class ByteAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.TINYINT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Short Avg aggregate function. */
public static class ShortAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.SMALLINT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Integer Avg aggregate function. */
public static class IntAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.INT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Long Avg aggregate function. */
public static class LongAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.BIGINT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Float Avg aggregate function. */
public static class FloatAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.FLOAT();
}
@Override
public DataType getSumType() {
return DataTypes.DOUBLE();
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {literal(0D), literal(0L)};
}
}
/** Built-in Double Avg aggregate function. */
public static class DoubleAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.DOUBLE();
}
@Override
public DataType getSumType() {
return DataTypes.DOUBLE();
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {literal(0D), literal(0L)};
}
}
/** Built-in Decimal Avg aggregate function. */
public static class DecimalAvgAggFunction extends AvgAggFunction {
private final DecimalType type;
public DecimalAvgAggFunction(DecimalType type) {
this.type = type;
}
@Override
public DataType getResultType() {
DecimalType t = (DecimalType) LogicalTypeMerging.findAvgAggType(type);
return DataTypes.DECIMAL(t.getPrecision(), t.getScale());
}
@Override
public DataType getSumType() {
DecimalType t = (DecimalType) LogicalTypeMerging.findSumAggType(type);
return DataTypes.DECIMAL(t.getPrecision(), t.getScale());
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {literal(BigDecimal.ZERO, getSumType().notNull()), literal(0L)};
}
}
}<|fim▁end|>
|
public DataType[] getAggBufferTypes() {
return new DataType[] {getSumType(), DataTypes.BIGINT()};
}
|
<|file_name|>sf_tuto.py<|end_file_name|><|fim▁begin|>import time<|fim▁hole|>import camkifu.stone
from camkifu.core import imgutil
class StonesFinderTuto(camkifu.stone.StonesFinder):
""" This class has been used to write a tutorial on how to create a new StonesFinder.
Run Camkifu with this class as the default StonesFinder in order to replay one step of the tuto.
In order to select the step, rename the desired method below to '_find(...)' .
"""
def __init__(self, vmanager):
super().__init__(vmanager)
self.canvas = None
def _learn(self):
pass
# ------------------------------------------------------
#
# TUTORIAL STEPS
#
# ------------------------------------------------------
def _find_minimal(self, goban_img):
""" Implementation 1 of _find() from the tutorial.
"""
imgutil.draw_str(goban_img, "Hello stones finding tutorial !")
self._show(goban_img)
def _find_suggest(self, _):
""" Implementation 2 of _find() from the tutorial.
"""
# check emptiness to avoid complaints since this method will be called in a loop
if self.is_empty(2, 12):
# using "numpy" coordinates frame for x and y
self.suggest(B, 2, 12)
def _find_bulk(self, _):
""" Implementation 3 of _find() from the tutorial.
"""
# using "numpy" coordinates frame for x and y
black = ((W, 8, 8), (W, 8, 10), (W, 10, 8), (W, 10, 10))
white = ((B, 7, 7), (B, 7, 11), (B, 11, 7), (B, 11, 11), (B, 9, 9))
add = black if self.total_f_processed % 2 else white
rem = white if self.total_f_processed % 2 else black
moves = []
for color, r, c in add:
moves.append((color, r, c))
for _, r, c in rem:
if not self.is_empty(r, c):
moves.append((E, r, c))
time.sleep(0.7)
self.bulk_update(moves)
def _find_getrect(self, goban_img):
""" Implementation 4 of _find() from the tutorial.
"""
canvas = numpy.zeros_like(goban_img)
for r in range(gsize): # row index
for c in range(gsize): # column index
if r == c or r == gsize - c - 1:
x0, y0, x1, y1 = self.getrect(r, c)
canvas[x0:x1, y0:y1] = goban_img[x0:x1, y0:y1]
self._show(canvas)
def _find_border(self, goban_img):
""" Implementation 5 of _find() from the tutorial.
"""
canvas = numpy.zeros_like(goban_img)
for r, c in self._empties_border(2): # 2 is the line height as in go vocabulary (0-based)
x0, y0, x1, y1 = self.getrect(r, c)
canvas[x0:x1, y0:y1] = goban_img[x0:x1, y0:y1]
self._show(canvas)
def _find_spiral(self, goban_img):
""" Implementation 6 of _find() from the tutorial.
"""
count = 0
if self.canvas is None:
self.canvas = numpy.zeros_like(goban_img)
for r, c in self._empties_spiral():
if count == self.total_f_processed % gsize ** 2:
x0, y0, x1, y1 = self.getrect(r, c)
self.canvas[x0:x1, y0:y1] = goban_img[x0:x1, y0:y1]
break
count += 1
self.last_shown = 0 # force display of all images
self._show(self.canvas)<|fim▁end|>
|
import numpy
from golib.config.golib_conf import gsize, B, W, E
|
<|file_name|>format.py<|end_file_name|><|fim▁begin|>"""Definitions for output formats."""
import collections
from enum import Enum, unique
__copyright__ = 'Copyright 2021, 3Liz'
__license__ = 'GPL version 3'
__email__ = '[email protected]'
format_output = collections.namedtuple('format', ['label', 'driver_name', 'extension'])
@unique
class Format(Enum):
""" Name of output formats."""
GeoJSON = format_output('GeoJSON', 'GeoJSON', 'geojson')
"""GeoJSON"""<|fim▁hole|> Shapefile = format_output('ESRI Shapefile', 'ESRI Shapefile', 'shp')
"""Shapefile"""
Kml = format_output('Kml', 'KML', 'kml')
"""Kml"""<|fim▁end|>
|
GeoPackage = format_output('GeoPackage', 'GPKG', 'gpkg')
"""GeoPackage"""
|
<|file_name|>jquery.ui.datepicker-ms.js<|end_file_name|><|fim▁begin|>/* Malaysian initialisation for the jQuery UI date picker plugin. */
/* Written by Mohd Nawawi Mohamad Jamili ([email protected]). */
jQuery(function ($) {
$.datepicker.regional['ms'] = {
closeText: 'Tutup',
prevText: '<Sebelum',
nextText: 'Selepas>',
currentText: 'hari ini',
monthNames: ['Januari', 'Februari', 'Mac', 'April', 'Mei', 'Jun',
'Julai', 'Ogos', 'September', 'Oktober', 'November', 'Disember'],
monthNamesShort: ['Jan', 'Feb', 'Mac', 'Apr', 'Mei', 'Jun',
'Jul', 'Ogo', 'Sep', 'Okt', 'Nov', 'Dis'],
dayNames: ['Ahad', 'Isnin', 'Selasa', 'Rabu', 'Khamis', 'Jumaat', 'Sabtu'],<|fim▁hole|> dayNamesShort: ['Aha', 'Isn', 'Sel', 'Rab', 'kha', 'Jum', 'Sab'],
dayNamesMin: ['Ah', 'Is', 'Se', 'Ra', 'Kh', 'Ju', 'Sa'],
weekHeader: 'Mg',
dateFormat: 'dd/mm/yy',
firstDay: 0,
isRTL: false,
showMonthAfterYear: false,
yearSuffix: ''};
$.datepicker.setDefaults($.datepicker.regional['ms']);
});<|fim▁end|>
| |
<|file_name|>descriptor.js<|end_file_name|><|fim▁begin|>// Copyright (C) 2017 Mozilla Corporation. All rights reserved.<|fim▁hole|>// This code is governed by the license found in the LICENSE file.
/*---
esid: sec-atomics.islockfree
description: Testing descriptor property of Atomics.add
includes: [propertyHelper.js]
features: [Atomics]
---*/
verifyProperty(Atomics, 'add', {
enumerable: false,
writable: true,
configurable: true,
});<|fim▁end|>
| |
<|file_name|>taggerscript.js<|end_file_name|><|fim▁begin|>/*
Language: Tagger Script
Author: Philipp Wolfer <[email protected]>
Description: Syntax Highlighting for the Tagger Script as used by MusicBrainz Picard.
Website: https://picard.musicbrainz.org<|fim▁hole|>function(hljs) {
var COMMENT = {
className: 'comment',
begin: /\$noop\(/,
end: /\)/,
contains: [{
begin: /\(/,
end: /\)/,
contains: ['self', {
begin: /\\./
}]
}],
relevance: 10
};
var FUNCTION = {
className: 'keyword',
begin: /\$(?!noop)[a-zA-Z][_a-zA-Z0-9]*/,
end: /\(/,
excludeEnd: true
};
var VARIABLE = {
className: 'variable',
begin: /%[_a-zA-Z0-9:]*/,
end: '%'
};
var ESCAPE_SEQUENCE = {
className: 'symbol',
begin: /\\./
};
return {
contains: [
COMMENT,
FUNCTION,
VARIABLE,
ESCAPE_SEQUENCE
]
};
}<|fim▁end|>
|
*/
|
<|file_name|>0007_create_allocation_strategy_and_behaviors.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def do_nothing(apps, schema_editor):
pass
def create_new_behaviors_and_strategies(apps, schema_editor):
CountingBehavior = apps.get_model("core", "CountingBehavior")
RefreshBehavior = apps.get_model("core", "RefreshBehavior")
RulesBehavior = apps.get_model("core", "RulesBehavior")
Provider = apps.get_model("core", "Provider")
AllocationStrategy = apps.get_model("core", "AllocationStrategy")
# Strategy #1 - Count from first to end of month, refresh on the first
counting_strategy_1, _ = CountingBehavior.objects.get_or_create(
name="1 Month - Calendar Window")
refresh_strategy_1, _ = RefreshBehavior.objects.get_or_create(
name="First of the Month")
# Strategy #2 - Count UP for one month, starting at (& refreshing at) the
# anniversary
counting_strategy_2, _ = CountingBehavior.objects.get_or_create(
name="1 Month - Calendar Window - Anniversary")
refresh_strategy_2, _ = RefreshBehavior.objects.get_or_create(
name="Anniversary Date")
# Rules that will be applied by default
rules = []
rule, _ = RulesBehavior.objects.get_or_create(
name="Ignore non-active status")
rules.append(rule)
rule, _ = RulesBehavior.objects.get_or_create(name="Multiply by Size CPU")
rules.append(rule)
for provider in Provider.objects.all():
new_strategy, _ = AllocationStrategy.objects.get_or_create(
provider=provider, counting_behavior=counting_strategy_1)
new_strategy.refresh_behaviors.add(refresh_strategy_1)
for rule in rules:
new_strategy.rules_behaviors.add(rule)
return
class Migration(migrations.Migration):
dependencies = [
('core', '0006_change_fields_as_not_null'),
]
operations = [
migrations.CreateModel(
name='AllocationStrategy', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ], options={<|fim▁hole|> ('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'counting_behavior', }, bases=(
models.Model,), ), migrations.CreateModel(
name='RefreshBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'refresh_behavior', }, bases=(
models.Model,), ), migrations.CreateModel(
name='RulesBehavior', fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(
max_length=255)), ], options={
'db_table': 'rules_behavior', }, bases=(
models.Model,), ), migrations.AddField(
model_name='allocationstrategy', name='counting_behavior', field=models.ForeignKey(
to='core.CountingBehavior'), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='provider', field=models.OneToOneField(
to='core.Provider'), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='refresh_behaviors', field=models.ManyToManyField(
to='core.RefreshBehavior', null=True, blank=True), preserve_default=True, ), migrations.AddField(
model_name='allocationstrategy', name='rules_behaviors', field=models.ManyToManyField(
to='core.RulesBehavior', null=True, blank=True), preserve_default=True, ), migrations.RunPython(
create_new_behaviors_and_strategies, do_nothing)]<|fim▁end|>
|
'db_table': 'allocation_strategy', }, bases=(
models.Model,), ), migrations.CreateModel(
name='CountingBehavior', fields=[
|
<|file_name|>main.py<|end_file_name|><|fim▁begin|>import argparse
from PGEnv import PGEnvironment
from PGAgent import PGAgent
if __name__ == '__main__':
parser = argparse.ArgumentParser()<|fim▁hole|> parser.add_argument('--gym_environment', type=str, default='Pong-v0',
help='OpenAI Gym Environment to be used (default to Pong-v0)')
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'],
help='running mode (default to train)')
parser.add_argument('--use_gpu', type=bool, default=False,
help='whether to use GPU (default to True)')
parser.add_argument('--gpu_id', type=int, default=0,
help='the id of the GPU to be used (default to 0)')
parser.add_argument('--model_save_path', type=str, default='./model/PG_model.ckpt',
help='path to save/load the model for training/testing (default to model/PG_model.ckpt)')
parser.add_argument('--check_point', type=int, default=None,
help='index of the ckeck point (default to None)')
parser.add_argument('--model_save_freq', type=int, default=100,
help='dump model at every k-th iteration (default to 100)')
parser.add_argument('--display', type=bool, default=False,
help='whether to render to result. (default to False)')
args = parser.parse_args()
if args.mode == 'train':
env = PGEnvironment(environment_name=args.gym_environment, display=args.display)
agent = PGAgent(env)
assert(args.model_save_path is not None)
agent.learn(model_save_frequency=args.model_save_freq, model_save_path=args.model_save_path, check_point = args.check_point,
use_gpu=args.use_gpu, gpu_id=args.gpu_id)
else:
# disable frame skipping during testing result in better performance (because the agent can take more actions)
env = PGEnvironment(environment_name=args.gym_environment, display=args.display, frame_skipping=False)
agent = PGAgent(env)
assert(args.check_point is not None)
agent.test(model_save_path = args.model_save_path, check_point=args.check_point,
use_gpu=args.use_gpu, gpu_id=args.gpu_id)
print('finished.')<|fim▁end|>
| |
<|file_name|>fabfile.py<|end_file_name|><|fim▁begin|>from fabric import api as fab
from contextlib import contextmanager<|fim▁hole|>@contextmanager
def with_vagrant():
with fab.settings(user="vagrant",host_string="127.0.0.1:2222",key_filename=".vagrant/machines/minecraft/virtualbox/private_key"):
yield
def ping(ip):
with with_vagrant():
return fab.run('ping -c 4 {}'.format(ip))
def save():
with with_vagrant():
fab.sudo('/etc/init.d/minecraft backup')
fab.get(remote_path='/srv/minecraft-server/backups/*', local_path="/Users/e003070/Dropbox/minecraft_backups")
def restore():
with with_vagrant():
fab.put(remote_path='/srv/minecraft-server/backups/', local_path="/Users/e003070/Dropbox/minecraft_backups")<|fim▁end|>
| |
<|file_name|>cell_range.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software<|fim▁hole|>// limitations under the License.
//! An example of using cell range noise
extern crate noise;
use noise::{cell2_range, cell3_range, cell4_range, Seed, Point2};
mod debug;
fn main() {
debug::render_png("cell2_range.png", &Seed::new(0), 1024, 1024, scaled_cell2_range);
debug::render_png("cell3_range.png", &Seed::new(0), 1024, 1024, scaled_cell3_range);
debug::render_png("cell4_range.png", &Seed::new(0), 1024, 1024, scaled_cell4_range);
println!("\nGenerated cell2_range.png, cell3_range.png and cell4_range.png");
}
fn scaled_cell2_range(seed: &Seed, point: &Point2<f64>) -> f64 {
cell2_range(seed, &[point[0] / 16.0, point[1] / 16.0]) * 2.0 - 1.0
}
fn scaled_cell3_range(seed: &Seed, point: &Point2<f64>) -> f64 {
cell3_range(seed, &[point[0] / 16.0, point[1] / 16.0, point[0] / 32.0]) * 2.0 - 1.0
}
fn scaled_cell4_range(seed: &Seed, point: &Point2<f64>) -> f64 {
cell4_range(seed, &[point[0] / 16.0, point[1] / 16.0, point[0] / 32.0, point[1] / 32.0]) * 2.0 - 1.0
}<|fim▁end|>
|
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
|
<|file_name|>surface.rs<|end_file_name|><|fim▁begin|>// Copyleft (ↄ) meh. <[email protected]> | http://meh.schizofreni.co
//
// This file is part of cancer.
//
// cancer is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// cancer is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with cancer. If not, see <http://www.gnu.org/licenses/>.<|fim▁hole|>use ffi::cairo::*;
use ffi::cairo::platform::*;
#[derive(Debug)]
pub struct Surface(pub *mut cairo_surface_t);
unsafe impl Send for Surface { }
impl Surface {
pub fn flush(&self) {
unsafe {
cairo_surface_flush(self.0);
}
}
}
#[cfg(all(unix, not(target_os = "macos")))]
impl Surface {
pub fn new(connection: &xcb::Connection, drawable: xcb::Drawable, visual: xcb::Visualtype, width: u32, height: u32) -> Self {
unsafe {
Surface(cairo_xcb_surface_create(connection.get_raw_conn(), drawable, &visual.base, width as c_int, height as c_int))
}
}
}
#[cfg(target_os = "macos")]
impl Surface {
pub fn new(context: *mut c_void, width: u32, height: u32) -> Self {
unsafe {
CGContextTranslateCTM(context, 0.0, height as CGFloat);
CGContextScaleCTM(context, 1.0, -1.0);
Surface(cairo_quartz_surface_create_for_cg_context(context, width as c_uint, height as c_uint))
}
}
}
impl Drop for Surface {
fn drop(&mut self) {
unsafe {
cairo_surface_destroy(self.0);
}
}
}<|fim▁end|>
| |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>// We only need to import the modules necessary for initial render
import CoreLayout from '../layouts/CoreLayout';
import Home from './Home';
import CounterRoute from './Counter';
/* Note: Instead of using JSX, we recommend using react-router
PlainRoute objects to build route definitions. */
export const createRoutes = () => ({
path : '/',
component : CoreLayout,
indexRoute : Home,
childRoutes : [
CounterRoute
]
});<|fim▁hole|>
/* Note: childRoutes can be chunked or otherwise loaded programmatically
using getChildRoutes with the following signature:
getChildRoutes (location, cb) {
require.ensure([], (require) => {
cb(null, [
// Remove imports!
require('./Counter').default(store)
])
})
}
However, this is not necessary for code-splitting! It simply provides
an API for async route definitions. Your code splitting should occur
inside the route `getComponent` function, since it is only invoked
when the route exists and matches.
*/
export default createRoutes;<|fim▁end|>
| |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|>require('./components/active-link');<|fim▁end|>
|
require('./components/dropdown');
|
<|file_name|>c_form.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from frappe.model.document import Document
class CForm(Document):
def validate(self):
"""Validate invoice that c-form is applicable
and no other c-form is received for that"""
for d in self.get('invoice_details'):
if d.invoice_no:
inv = frappe.db.sql("""select c_form_applicable, c_form_no from
`tabSales Invoice` where name = %s and docstatus = 1""", d.invoice_no)
if inv and inv[0][0] != 'Yes':
frappe.throw("C-form is not applicable for Invoice: %s" % d.invoice_no)
elif inv and inv[0][1] and inv[0][1] != self.name:
frappe.throw("""Invoice %s is tagged in another C-form: %s.
If you want to change C-form no for this invoice,
please remove invoice no from the previous c-form and then try again""" %
(d.invoice_no, inv[0][1]))
elif not inv:
frappe.throw("Row %s: Invoice %s is invalid, it might be cancelled / does not exist. \
Please enter a valid Invoice" % d.idx, d.invoice_no)<|fim▁hole|>
def on_submit(self):
self.set_cform_in_sales_invoices()
def before_cancel(self):
# remove cform reference
frappe.db.sql("""update `tabSales Invoice` set c_form_no=null where c_form_no=%s""", self.name)
def set_cform_in_sales_invoices(self):
inv = [d.invoice_no for d in self.get('invoice_details')]
if inv:
frappe.db.sql("""update `tabSales Invoice` set c_form_no=%s, modified=%s where name in (%s)""" %
('%s', '%s', ', '.join(['%s'] * len(inv))), tuple([self.name, self.modified] + inv))
frappe.db.sql("""update `tabSales Invoice` set c_form_no = null, modified = %s
where name not in (%s) and ifnull(c_form_no, '') = %s""" %
('%s', ', '.join(['%s']*len(inv)), '%s'), tuple([self.modified] + inv + [self.name]))
else:
frappe.throw(_("Please enter atleast 1 invoice in the table"))
def set_total_invoiced_amount(self):
total = sum([flt(d.grand_total) for d in self.get('invoice_details')])
frappe.db.set(self, 'total_invoiced_amount', total)
def get_invoice_details(self, invoice_no):
""" Pull details from invoices for referrence """
if invoice_no:
inv = frappe.db.get_value("Sales Invoice", invoice_no,
["posting_date", "territory", "net_total", "grand_total"], as_dict=True)
return {
'invoice_date' : inv.posting_date,
'territory' : inv.territory,
'net_total' : inv.net_total,
'grand_total' : inv.grand_total
}<|fim▁end|>
|
def on_update(self):
""" Update C-Form No on invoices"""
self.set_total_invoiced_amount()
|
<|file_name|>EmberConsoleNcpValueCommand.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2016-2019 by the respective copyright holders.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package com.zsmartsystems.zigbee.console.ember;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import com.zsmartsystems.zigbee.ZigBeeNetworkManager;
import com.zsmartsystems.zigbee.dongle.ember.EmberNcp;
import com.zsmartsystems.zigbee.dongle.ember.ezsp.structure.EzspStatus;
import com.zsmartsystems.zigbee.dongle.ember.ezsp.structure.EzspValueId;
/**
* Reads or writes an NCP {@link EzspValueId}
*
* @author Chris Jackson
*
*/
public class EmberConsoleNcpValueCommand extends EmberConsoleAbstractCommand {
@Override
public String getCommand() {
return "ncpvalue";
}
@Override
public String getDescription() {
return "Read or write an NCP memory value";
}<|fim▁hole|> }
@Override
public String getHelp() {
return "VALUEID is the Ember NCP value enumeration\n" + "VALUE is the value to write\n"
+ "If VALUE is not defined, then the memory will be read.\n"
+ "If no arguments are supplied then all values will be displayed.";
}
@Override
public void process(ZigBeeNetworkManager networkManager, String[] args, PrintStream out)
throws IllegalArgumentException {
if (args.length > 3) {
throw new IllegalArgumentException("Incorrect number of arguments.");
}
EmberNcp ncp = getEmberNcp(networkManager);
if (args.length == 1) {
Map<EzspValueId, int[]> values = new TreeMap<>();
for (EzspValueId valueId : EzspValueId.values()) {
if (valueId == EzspValueId.UNKNOWN) {
continue;
}
values.put(valueId, ncp.getValue(valueId));
}
for (Entry<EzspValueId, int[]> value : values.entrySet()) {
out.print(String.format("%-50s", value.getKey()));
if (value.getValue() != null) {
out.print(displayValue(value.getKey(), value.getValue()));
}
out.println();
}
return;
}
EzspValueId valueId = EzspValueId.valueOf(args[1].toUpperCase());
if (args.length == 2) {
int[] value = ncp.getValue(valueId);
if (value == null) {
out.println("Error reading Ember NCP value " + valueId.toString());
} else {
out.println("Ember NCP value " + valueId.toString() + " is " + displayValue(valueId, value));
}
} else {
int[] value = parseInput(valueId, Arrays.copyOfRange(args, 2, args.length));
if (value == null) {
throw new IllegalArgumentException("Unable to convert data to value array");
}
EzspStatus response = ncp.setValue(valueId, value);
out.println("Writing Ember NCP value " + valueId.toString() + " was "
+ (response == EzspStatus.EZSP_SUCCESS ? "" : "un") + "successful.");
}
}
private String displayValue(EzspValueId valueId, int[] value) {
StringBuilder builder = new StringBuilder();
switch (valueId) {
default:
boolean first = true;
for (int intVal : value) {
if (!first) {
builder.append(' ');
}
first = false;
builder.append(String.format("%02X", intVal));
}
break;
}
return builder.toString();
}
private int[] parseInput(EzspValueId valueId, String[] args) {
int[] value = null;
switch (valueId) {
case EZSP_VALUE_APS_FRAME_COUNTER:
case EZSP_VALUE_NWK_FRAME_COUNTER:
Long longValue = Long.parseLong(args[0]);
value = new int[4];
value[0] = (int) (longValue & 0x000000FF);
value[1] = (int) (longValue & 0x0000FF00) >> 8;
value[2] = (int) (longValue & 0x00FF0000) >> 16;
value[3] = (int) (longValue & 0xFF000000) >> 24;
break;
default:
break;
}
return value;
}
}<|fim▁end|>
|
@Override
public String getSyntax() {
return "[VALUEID] [VALUE]";
|
<|file_name|>nb.js<|end_file_name|><|fim▁begin|>/*! Select2 4.0.0 | https://github.com/select2/select2/blob/master/LICENSE.md */
(function () {
if (jQuery && jQuery.fn && jQuery.fn.select2 && jQuery.fn.select2.amd)var e = jQuery.fn.select2.amd;
return e.define("select2/i18n/nb", [], function () {
return {<|fim▁hole|> var t = e.minimum - e.input.length, n = "Vennligst skriv inn ";
return t > 1 ? n += " flere tegn" : n += " tegn til", n
}, loadingMore: function () {
return "Laster flere resultater…"
}, maximumSelected: function (e) {
return "Du kan velge maks " + e.maximum + " elementer"
}, noResults: function () {
return "Ingen treff"
}, searching: function () {
return "Søker…"
}
}
}), {define: e.define, require: e.require}
})();<|fim▁end|>
|
inputTooLong: function (e) {
var t = e.input.length - e.maximum;
return "Vennligst fjern " + t + " tegn"
}, inputTooShort: function (e) {
|
<|file_name|>test.js<|end_file_name|><|fim▁begin|>'use strict';
module.exports = {
db: 'mongodb://localhost/equinix-test',
port: 3001,
app: {
title: 'Equinix - Test Environment'
},
facebook: {
clientID: process.env.FACEBOOK_ID || 'APP_ID',
clientSecret: process.env.FACEBOOK_SECRET || 'APP_SECRET',
callbackURL: 'http://localhost:3000/auth/facebook/callback'
},
twitter: {
clientID: process.env.TWITTER_KEY || 'CONSUMER_KEY',
clientSecret: process.env.TWITTER_SECRET || 'CONSUMER_SECRET',
callbackURL: 'http://localhost:3000/auth/twitter/callback'
},
google: {
clientID: process.env.GOOGLE_ID || 'APP_ID',
clientSecret: process.env.GOOGLE_SECRET || 'APP_SECRET',
callbackURL: 'http://localhost:3000/auth/google/callback'
},<|fim▁hole|> clientSecret: process.env.LINKEDIN_SECRET || 'APP_SECRET',
callbackURL: 'http://localhost:3000/auth/linkedin/callback'
},
github: {
clientID: process.env.GITHUB_ID || 'APP_ID',
clientSecret: process.env.GITHUB_SECRET || 'APP_SECRET',
callbackURL: 'http://localhost:3000/auth/github/callback'
},
mailer: {
from: process.env.MAILER_FROM || 'MAILER_FROM',
options: {
service: process.env.MAILER_SERVICE_PROVIDER || 'MAILER_SERVICE_PROVIDER',
auth: {
user: process.env.MAILER_EMAIL_ID || 'MAILER_EMAIL_ID',
pass: process.env.MAILER_PASSWORD || 'MAILER_PASSWORD'
}
}
}
};<|fim▁end|>
|
linkedin: {
clientID: process.env.LINKEDIN_ID || 'APP_ID',
|
<|file_name|>fall.py<|end_file_name|><|fim▁begin|>#
# Copyright (C) 2016 Dang Duong
#
# This file is part of Open Tux World.
#
# Open Tux World is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Open Tux World is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Open Tux World. If not, see <http://www.gnu.org/licenses/>.
#
from scripts import common
from mathutils import Vector
logic = common.logic
def main(cont):
own = cont.owner
own.applyForce([0, 0, -10 * own.mass], False)
if own["health"] < 1:
return
own["hit"] = False
own.enableRigidBody()
v = Vector((own["v_x"], own["v_y"], own["v_z"]))
dv = Vector(own.worldLinearVelocity) - v
v += dv
speed = common.getDistance([dv.x, dv.y, dv.z])
own["v_x"] = v.x
own["v_y"] = v.y
own["v_z"] = v.z
if speed > common.DANGER_SPEED:
if speed > common.FATAL_SPEED:
own["health"] = 0
else:
own["health"] -= speed * (common.HIGH_DAMAGE_RATE if speed > common.HIGH_DANGER_SPEED else common.DAMAGE_RATE)
own.state = logic.KX_STATE3
elif speed < common.RIGID_SPEED and (cont.sensors["Collision.001"].positive or not own["fall"]):
own.disableRigidBody()<|fim▁hole|><|fim▁end|>
|
own.worldOrientation[2] = [0.0,0.0,1.0]
own.state = logic.KX_STATE2
|
<|file_name|>view.py<|end_file_name|><|fim▁begin|># Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
from gettext import ngettext
import locale
import logging
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics import style
from sugar3.graphics.icon import Icon, CellRendererIcon
from jarabe.controlpanel.sectionview import SectionView
from jarabe.model.update import updater
from jarabe.model import bundleregistry
_DEBUG_VIEW_ALL = True
class ActivityUpdater(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = updater.get_instance()
self._id_progresss = self._model.connect('progress',
self.__progress_cb)
self._id_updates = self._model.connect('updates-available',
self.__updates_available_cb)
self._id_error = self._model.connect('error',
self.__error_cb)
self._id_finished = self._model.connect('finished',
self.__finished_cb)
self.set_spacing(style.DEFAULT_SPACING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._top_label = Gtk.Label()
self._top_label.set_line_wrap(True)
self._top_label.set_justify(Gtk.Justification.LEFT)
self._top_label.props.xalign = 0
self.pack_start(self._top_label, False, True, 0)
self._top_label.show()
separator = Gtk.HSeparator()
self.pack_start(separator, False, True, 0)
separator.show()
self._bottom_label = Gtk.Label()
self._bottom_label.set_line_wrap(True)
self._bottom_label.set_justify(Gtk.Justification.LEFT)
self._bottom_label.props.xalign = 0
self._bottom_label.set_markup(
_('Software updates correct errors, eliminate security '
'vulnerabilities, and provide new features.'))
self.pack_start(self._bottom_label, False, True, 0)
self._bottom_label.show()
self._update_box = None
self._progress_pane = None
state = self._model.get_state()
if state in (updater.STATE_IDLE, updater.STATE_CHECKED):
self._refresh()
elif state in (updater.STATE_CHECKING, updater.STATE_DOWNLOADING,
updater.STATE_UPDATING):
self._switch_to_progress_pane()
self._progress_pane.set_message(_('Update in progress...'))
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, widget):
self._model.disconnect(self._id_progresss)
self._model.disconnect(self._id_updates)
self._model.disconnect(self._id_error)
self._model.disconnect(self._id_finished)
self._model.clean()
def _switch_to_update_box(self, updates):
if self._update_box in self.get_children():
return
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box is None:
self._update_box = UpdateBox(updates)
self._update_box.refresh_button.connect(
'clicked',
self.__refresh_button_clicked_cb)
self._update_box.install_button.connect(
'clicked',
self.__install_button_clicked_cb)
self.pack_start(self._update_box, expand=True, fill=True, padding=0)
self._update_box.show()
def _switch_to_progress_pane(self):
if self._progress_pane in self.get_children():
return
if self._model.get_state() == updater.STATE_CHECKING:
top_message = _('Checking for updates...')
else:
top_message = _('Installing updates...')
self._top_label.set_markup('<big>%s</big>' % top_message)
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
if self._progress_pane is None:
self._progress_pane = ProgressPane()
self._progress_pane.cancel_button.connect(
'clicked',
self.__cancel_button_clicked_cb)
self.pack_start(
self._progress_pane, expand=True, fill=False, padding=0)
self._progress_pane.show()
def _clear_center(self):
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
def __progress_cb(self, model, state, bundle_name, progress):
if state == updater.STATE_CHECKING:
if bundle_name:
message = _('Checking %s...') % bundle_name
else:
message = _('Looking for updates...')
elif state == updater.STATE_DOWNLOADING:
message = _('Downloading %s...') % bundle_name
elif state == updater.STATE_UPDATING:
message = _('Updating %s...') % bundle_name
self._switch_to_progress_pane()
self._progress_pane.set_message(message)
self._progress_pane.set_progress(progress)
def __updates_available_cb(self, model, updates):
logging.debug('ActivityUpdater.__updates_available_cb')
available_updates = len(updates)
if not available_updates:
top_message = _('Your software is up-to-date')
else:
top_message = ngettext('You can install %s update',
'You can install %s updates',
available_updates)
top_message = top_message % available_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
if not available_updates:
self._clear_center()
else:
self._switch_to_update_box(updates)
def __error_cb(self, model, updates):
logging.debug('ActivityUpdater.__error_cb')
top_message = _('Can\'t connect to the activity server')
self._top_label.set_markup('<big>%s</big>' % top_message)
self._bottom_label.set_markup(
_('Verify your connection to internet and try again, '
'or try again later'))
self._clear_center()
def __refresh_button_clicked_cb(self, button):
self._refresh()
def _refresh(self):
self._model.check_updates()
def __install_button_clicked_cb(self, button):
self._model.update(self._update_box.get_bundles_to_update())
def __cancel_button_clicked_cb(self, button):
self._model.cancel()
def __finished_cb(self, model, installed_updates, failed_updates,
cancelled):
num_installed = len(installed_updates)
logging.debug('ActivityUpdater.__finished_cb')
top_message = ngettext('%s update was installed',
'%s updates were installed', num_installed)
top_message = top_message % num_installed
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
self._clear_center()
def undo(self):
self._model.cancel()
class ProgressPane(Gtk.VBox):
"""Container which replaces the `ActivityPane` during refresh or
install."""
def __init__(self):
Gtk.VBox.__init__(self)
self.set_spacing(style.DEFAULT_PADDING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._progress = Gtk.ProgressBar()
self.pack_start(self._progress, True, True, 0)
self._progress.show()
self._label = Gtk.Label()
self._label.set_line_wrap(True)
self._label.set_property('xalign', 0.5)
self._label.modify_fg(Gtk.StateType.NORMAL,
style.COLOR_BUTTON_GREY.get_gdk_color())
self.pack_start(self._label, True, True, 0)
self._label.show()
alignment_box = Gtk.Alignment.new(xalign=0.5, yalign=0.5,
xscale=0, yscale=0)
self.pack_start(alignment_box, True, True, 0)
alignment_box.show()
self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CANCEL)
alignment_box.add(self.cancel_button)
self.cancel_button.show()
def set_message(self, message):
self._label.set_text(message)
def set_progress(self, fraction):
self._progress.props.fraction = fraction
class UpdateBox(Gtk.VBox):
def __init__(self, updates):
Gtk.VBox.__init__(self)
self.set_spacing(style.DEFAULT_PADDING)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(
Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.pack_start(scrolled_window, True, True, 0)
scrolled_window.show()
self._update_list = UpdateList(updates)
self._update_list.props.model.connect('row-changed',
self.__row_changed_cb)
scrolled_window.add(self._update_list)
self._update_list.show()
bottom_box = Gtk.HBox()
bottom_box.set_spacing(style.DEFAULT_SPACING)
self.pack_start(bottom_box, False, True, 0)
bottom_box.show()
self._size_label = Gtk.Label()
self._size_label.props.xalign = 0
self._size_label.set_justify(Gtk.Justification.LEFT)
bottom_box.pack_start(self._size_label, True, True, 0)
self._size_label.show()
self.refresh_button = Gtk.Button(stock=Gtk.STOCK_REFRESH)
bottom_box.pack_start(self.refresh_button, False, True, 0)
self.refresh_button.show()
self.install_button = Gtk.Button(_('Install selected'))
self.install_button.props.image = Icon(
icon_name='emblem-downloads',
pixel_size=style.SMALL_ICON_SIZE)
bottom_box.pack_start(self.install_button, False, True, 0)
self.install_button.show()
self._update_total_size_label()
def __row_changed_cb(self, list_model, path, iterator):
self._update_total_size_label()
self._update_install_button()
def _update_total_size_label(self):
total_size = 0
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
total_size += row[UpdateListModel.SIZE]
markup = _('Download size: %s') % _format_size(total_size)
self._size_label.set_markup(markup)
def _update_install_button(self):
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
self.install_button.props.sensitive = True
return
self.install_button.props.sensitive = False
def get_bundles_to_update(self):
bundles_to_update = []
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
bundles_to_update.append(row[UpdateListModel.BUNDLE_ID])
return bundles_to_update
<|fim▁hole|>
class UpdateList(Gtk.TreeView):
def __init__(self, updates):
list_model = UpdateListModel(updates)
Gtk.TreeView.__init__(self, list_model)
self.set_reorderable(False)
self.set_enable_search(False)
self.set_headers_visible(False)
toggle_renderer = Gtk.CellRendererToggle()
toggle_renderer.props.activatable = True
toggle_renderer.props.xpad = style.DEFAULT_PADDING
toggle_renderer.props.indicator_size = style.zoom(26)
toggle_renderer.connect('toggled', self.__toggled_cb)
toggle_column = Gtk.TreeViewColumn()
toggle_column.pack_start(toggle_renderer, True)
toggle_column.add_attribute(toggle_renderer, 'active',
UpdateListModel.SELECTED)
self.append_column(toggle_column)
icon_renderer = CellRendererIcon(self)
icon_renderer.props.width = style.STANDARD_ICON_SIZE
icon_renderer.props.height = style.STANDARD_ICON_SIZE
icon_renderer.props.size = style.STANDARD_ICON_SIZE
icon_renderer.props.xpad = style.DEFAULT_PADDING
icon_renderer.props.ypad = style.DEFAULT_PADDING
icon_renderer.props.stroke_color = style.COLOR_TOOLBAR_GREY.get_svg()
icon_renderer.props.fill_color = style.COLOR_TRANSPARENT.get_svg()
icon_column = Gtk.TreeViewColumn()
icon_column.pack_start(icon_renderer, True)
icon_column.add_attribute(icon_renderer, 'file-name',
UpdateListModel.ICON_FILE_NAME)
self.append_column(icon_column)
text_renderer = Gtk.CellRendererText()
description_column = Gtk.TreeViewColumn()
description_column.pack_start(text_renderer, True)
description_column.add_attribute(text_renderer, 'markup',
UpdateListModel.DESCRIPTION)
self.append_column(description_column)
def __toggled_cb(self, cell_renderer, path):
row = self.props.model[path]
row[UpdateListModel.SELECTED] = not row[UpdateListModel.SELECTED]
class UpdateListModel(Gtk.ListStore):
BUNDLE_ID = 0
SELECTED = 1
ICON_FILE_NAME = 2
DESCRIPTION = 3
SIZE = 4
def __init__(self, updates):
Gtk.ListStore.__init__(self, str, bool, str, str, int)
registry = bundleregistry.get_registry()
for bundle_update in updates:
installed = registry.get_bundle(bundle_update.bundle_id)
row = [None] * 5
row[self.BUNDLE_ID] = bundle_update.bundle_id
row[self.SELECTED] = True
if installed:
row[self.ICON_FILE_NAME] = installed.get_icon()
else:
if bundle_update.icon_file_name is not None:
row[self.ICON_FILE_NAME] = bundle_update.icon_file_name
if installed:
details = _('From version %(current)s to %(new)s (Size: '
'%(size)s)')
details = details % \
{'current': installed.get_activity_version(),
'new': bundle_update.version,
'size': _format_size(bundle_update.size)}
else:
details = _('Version %(version)s (Size: %(size)s)')
details = details % \
{'version': bundle_update.version,
'size': _format_size(bundle_update.size)}
row[self.DESCRIPTION] = '<b>%s</b>\n%s' % \
(bundle_update.name, details)
row[self.SIZE] = bundle_update.size
self.append(row)
def _format_size(size):
"""Convert a given size in bytes to a nicer better readable unit"""
if size == 0:
# TRANS: download size is 0
return _('None')
elif size < 1024:
# TRANS: download size of very small updates
return _('1 KB')
elif size < 1024 * 1024:
# TRANS: download size of small updates, e.g. '250 KB'
return locale.format_string(_('%.0f KB'), size / 1024.0)
else:
# TRANS: download size of updates, e.g. '2.3 MB'
return locale.format_string(_('%.1f MB'), size / 1024.0 / 1024)<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! [Flexible target specification.](https://github.com/rust-lang/rfcs/pull/131)
//!
//! Rust targets a wide variety of usecases, and in the interest of flexibility,
//! allows new target triples to be defined in configuration files. Most users
//! will not need to care about these, but this is invaluable when porting Rust
//! to a new platform, and allows for an unprecedented level of control over how
//! the compiler works.
//!
//! # Using custom targets
//!
//! A target triple, as passed via `rustc --target=TRIPLE`, will first be
//! compared against the list of built-in targets. This is to ease distributing
//! rustc (no need for configuration files) and also to hold these built-in
//! targets as immutable and sacred. If `TRIPLE` is not one of the built-in
//! targets, rustc will check if a file named `TRIPLE` exists. If it does, it
//! will be loaded as the target configuration. If the file does not exist,
//! rustc will search each directory in the environment variable
//! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will
//! be loaded. If no file is found in any of those directories, a fatal error
//! will be given.
//!
//! Projects defining their own targets should use
//! `--target=path/to/my-awesome-platform.json` instead of adding to
//! `RUST_TARGET_PATH`.
//!
//! # Defining a new target
//!
//! Targets are defined using [JSON](https://json.org/). The `Target` struct in
//! this module defines the format the JSON file should take, though each
//! underscore in the field names should be replaced with a hyphen (`-`) in the
//! JSON file. Some fields are required in every target specification, such as
//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`,
//! `arch`, and `os`. In general, options passed to rustc with `-C` override
//! the target's settings, though `target-feature` and `link-args` will *add*
//! to the list specified by the target, rather than replace.
use crate::abi::Endian;
use crate::spec::abi::{lookup as lookup_abi, Abi};
use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_serialize::json::{Json, ToJson};
use rustc_span::symbol::{sym, Symbol};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::{fmt, io};
use rustc_macros::HashStable_Generic;
pub mod abi;
pub mod crt_objects;
mod android_base;
mod apple_base;
mod apple_sdk_base;
mod avr_gnu_base;
mod bpf_base;
mod dragonfly_base;
mod freebsd_base;
mod fuchsia_base;
mod haiku_base;
mod hermit_base;
mod hermit_kernel_base;
mod illumos_base;
mod l4re_base;
mod linux_base;
mod linux_gnu_base;
mod linux_kernel_base;
mod linux_musl_base;
mod linux_uclibc_base;
mod msvc_base;
mod netbsd_base;
mod openbsd_base;
mod redox_base;
mod solaris_base;
mod thumb_base;
mod uefi_msvc_base;
mod vxworks_base;
mod wasm_base;
mod windows_gnu_base;
mod windows_msvc_base;
mod windows_uwp_gnu_base;
mod windows_uwp_msvc_base;
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum LinkerFlavor {
Em,
Gcc,
Ld,
Msvc,
Lld(LldFlavor),
PtxLinker,
BpfLinker,
}
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum LldFlavor {
Wasm,
Ld64,
Ld,
Link,
}
impl LldFlavor {
fn from_str(s: &str) -> Option<Self> {
Some(match s {
"darwin" => LldFlavor::Ld64,
"gnu" => LldFlavor::Ld,
"link" => LldFlavor::Link,
"wasm" => LldFlavor::Wasm,
_ => return None,
})
}
}
impl ToJson for LldFlavor {
fn to_json(&self) -> Json {
match *self {
LldFlavor::Ld64 => "darwin",
LldFlavor::Ld => "gnu",
LldFlavor::Link => "link",
LldFlavor::Wasm => "wasm",
}
.to_json()
}
}
impl ToJson for LinkerFlavor {
fn to_json(&self) -> Json {
self.desc().to_json()
}
}
macro_rules! flavor_mappings {
($((($($flavor:tt)*), $string:expr),)*) => (
impl LinkerFlavor {
pub const fn one_of() -> &'static str {
concat!("one of: ", $($string, " ",)*)
}
pub fn from_str(s: &str) -> Option<Self> {
Some(match s {
$($string => $($flavor)*,)*
_ => return None,
})
}
pub fn desc(&self) -> &str {
match *self {
$($($flavor)* => $string,)*
}
}
}
)
}
flavor_mappings! {
((LinkerFlavor::Em), "em"),
((LinkerFlavor::Gcc), "gcc"),
((LinkerFlavor::Ld), "ld"),
((LinkerFlavor::Msvc), "msvc"),
((LinkerFlavor::PtxLinker), "ptx-linker"),
((LinkerFlavor::BpfLinker), "bpf-linker"),
((LinkerFlavor::Lld(LldFlavor::Wasm)), "wasm-ld"),
((LinkerFlavor::Lld(LldFlavor::Ld64)), "ld64.lld"),
((LinkerFlavor::Lld(LldFlavor::Ld)), "ld.lld"),
((LinkerFlavor::Lld(LldFlavor::Link)), "lld-link"),
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)]
pub enum PanicStrategy {
Unwind,
Abort,
}
impl PanicStrategy {
pub fn desc(&self) -> &str {
match *self {
PanicStrategy::Unwind => "unwind",
PanicStrategy::Abort => "abort",
}
}
pub fn desc_symbol(&self) -> Symbol {
match *self {
PanicStrategy::Unwind => sym::unwind,
PanicStrategy::Abort => sym::abort,
}
}
}
impl ToJson for PanicStrategy {
fn to_json(&self) -> Json {
match *self {
PanicStrategy::Abort => "abort".to_json(),
PanicStrategy::Unwind => "unwind".to_json(),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)]
pub enum RelroLevel {
Full,
Partial,
Off,
None,
}
impl RelroLevel {
pub fn desc(&self) -> &str {
match *self {
RelroLevel::Full => "full",
RelroLevel::Partial => "partial",
RelroLevel::Off => "off",
RelroLevel::None => "none",
}
}
}
impl FromStr for RelroLevel {
type Err = ();
fn from_str(s: &str) -> Result<RelroLevel, ()> {
match s {
"full" => Ok(RelroLevel::Full),
"partial" => Ok(RelroLevel::Partial),
"off" => Ok(RelroLevel::Off),
"none" => Ok(RelroLevel::None),
_ => Err(()),
}
}
}
impl ToJson for RelroLevel {
fn to_json(&self) -> Json {
match *self {
RelroLevel::Full => "full".to_json(),
RelroLevel::Partial => "partial".to_json(),
RelroLevel::Off => "off".to_json(),
RelroLevel::None => "None".to_json(),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)]
pub enum MergeFunctions {
Disabled,
Trampolines,
Aliases,
}
impl MergeFunctions {
pub fn desc(&self) -> &str {
match *self {
MergeFunctions::Disabled => "disabled",
MergeFunctions::Trampolines => "trampolines",
MergeFunctions::Aliases => "aliases",
}
}
}
impl FromStr for MergeFunctions {
type Err = ();
fn from_str(s: &str) -> Result<MergeFunctions, ()> {
match s {
"disabled" => Ok(MergeFunctions::Disabled),
"trampolines" => Ok(MergeFunctions::Trampolines),
"aliases" => Ok(MergeFunctions::Aliases),
_ => Err(()),
}
}
}
impl ToJson for MergeFunctions {
fn to_json(&self) -> Json {
match *self {
MergeFunctions::Disabled => "disabled".to_json(),
MergeFunctions::Trampolines => "trampolines".to_json(),
MergeFunctions::Aliases => "aliases".to_json(),
}
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum RelocModel {
Static,
Pic,
DynamicNoPic,
Ropi,
Rwpi,
RopiRwpi,
}
impl FromStr for RelocModel {
type Err = ();
fn from_str(s: &str) -> Result<RelocModel, ()> {
Ok(match s {
"static" => RelocModel::Static,
"pic" => RelocModel::Pic,
"dynamic-no-pic" => RelocModel::DynamicNoPic,
"ropi" => RelocModel::Ropi,
"rwpi" => RelocModel::Rwpi,
"ropi-rwpi" => RelocModel::RopiRwpi,
_ => return Err(()),
})
}
}
impl ToJson for RelocModel {
fn to_json(&self) -> Json {
match *self {
RelocModel::Static => "static",
RelocModel::Pic => "pic",
RelocModel::DynamicNoPic => "dynamic-no-pic",
RelocModel::Ropi => "ropi",
RelocModel::Rwpi => "rwpi",
RelocModel::RopiRwpi => "ropi-rwpi",
}
.to_json()
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum CodeModel {
Tiny,
Small,
Kernel,
Medium,
Large,
}
impl FromStr for CodeModel {
type Err = ();
fn from_str(s: &str) -> Result<CodeModel, ()> {
Ok(match s {
"tiny" => CodeModel::Tiny,
"small" => CodeModel::Small,
"kernel" => CodeModel::Kernel,
"medium" => CodeModel::Medium,
"large" => CodeModel::Large,
_ => return Err(()),
})
}
}
impl ToJson for CodeModel {
fn to_json(&self) -> Json {
match *self {
CodeModel::Tiny => "tiny",
CodeModel::Small => "small",
CodeModel::Kernel => "kernel",
CodeModel::Medium => "medium",
CodeModel::Large => "large",
}
.to_json()
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum TlsModel {
GeneralDynamic,
LocalDynamic,
InitialExec,
LocalExec,
}
impl FromStr for TlsModel {
type Err = ();
fn from_str(s: &str) -> Result<TlsModel, ()> {
Ok(match s {
// Note the difference "general" vs "global" difference. The model name is "general",
// but the user-facing option name is "global" for consistency with other compilers.
"global-dynamic" => TlsModel::GeneralDynamic,
"local-dynamic" => TlsModel::LocalDynamic,
"initial-exec" => TlsModel::InitialExec,
"local-exec" => TlsModel::LocalExec,
_ => return Err(()),
})
}
}
impl ToJson for TlsModel {
fn to_json(&self) -> Json {
match *self {
TlsModel::GeneralDynamic => "global-dynamic",
TlsModel::LocalDynamic => "local-dynamic",
TlsModel::InitialExec => "initial-exec",
TlsModel::LocalExec => "local-exec",
}
.to_json()
}
}
/// Everything is flattened to a single enum to make the json encoding/decoding less annoying.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub enum LinkOutputKind {
/// Dynamically linked non position-independent executable.
DynamicNoPicExe,
/// Dynamically linked position-independent executable.
DynamicPicExe,
/// Statically linked non position-independent executable.
StaticNoPicExe,
/// Statically linked position-independent executable.
StaticPicExe,
/// Regular dynamic library ("dynamically linked").
DynamicDylib,
/// Dynamic library with bundled libc ("statically linked").
StaticDylib,
/// WASI module with a lifetime past the _initialize entry point
WasiReactorExe,
}
impl LinkOutputKind {
fn as_str(&self) -> &'static str {
match self {
LinkOutputKind::DynamicNoPicExe => "dynamic-nopic-exe",
LinkOutputKind::DynamicPicExe => "dynamic-pic-exe",
LinkOutputKind::StaticNoPicExe => "static-nopic-exe",
LinkOutputKind::StaticPicExe => "static-pic-exe",
LinkOutputKind::DynamicDylib => "dynamic-dylib",
LinkOutputKind::StaticDylib => "static-dylib",
LinkOutputKind::WasiReactorExe => "wasi-reactor-exe",
}
}
pub(super) fn from_str(s: &str) -> Option<LinkOutputKind> {
Some(match s {
"dynamic-nopic-exe" => LinkOutputKind::DynamicNoPicExe,
"dynamic-pic-exe" => LinkOutputKind::DynamicPicExe,
"static-nopic-exe" => LinkOutputKind::StaticNoPicExe,
"static-pic-exe" => LinkOutputKind::StaticPicExe,
"dynamic-dylib" => LinkOutputKind::DynamicDylib,
"static-dylib" => LinkOutputKind::StaticDylib,
"wasi-reactor-exe" => LinkOutputKind::WasiReactorExe,
_ => return None,
})
}
}
impl fmt::Display for LinkOutputKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<String>>;
#[derive(Clone, Copy, Hash, Debug, PartialEq, Eq)]
pub enum SplitDebuginfo {
/// Split debug-information is disabled, meaning that on supported platforms
/// you can find all debug information in the executable itself. This is
/// only supported for ELF effectively.
///
/// * Windows - not supported
/// * macOS - don't run `dsymutil`
/// * ELF - `.dwarf_*` sections
Off,
/// Split debug-information can be found in a "packed" location separate
/// from the final artifact. This is supported on all platforms.
///
/// * Windows - `*.pdb`
/// * macOS - `*.dSYM` (run `dsymutil`)
/// * ELF - `*.dwp` (run `rust-llvm-dwp`)
Packed,
/// Split debug-information can be found in individual object files on the
/// filesystem. The main executable may point to the object files.
///
/// * Windows - not supported
/// * macOS - supported, scattered object files
/// * ELF - supported, scattered `*.dwo` files
Unpacked,
}
impl SplitDebuginfo {
fn as_str(&self) -> &'static str {
match self {
SplitDebuginfo::Off => "off",
SplitDebuginfo::Packed => "packed",
SplitDebuginfo::Unpacked => "unpacked",
}
}
}
impl FromStr for SplitDebuginfo {
type Err = ();
fn from_str(s: &str) -> Result<SplitDebuginfo, ()> {
Ok(match s {
"off" => SplitDebuginfo::Off,
"unpacked" => SplitDebuginfo::Unpacked,
"packed" => SplitDebuginfo::Packed,
_ => return Err(()),
})
}
}
impl ToJson for SplitDebuginfo {
fn to_json(&self) -> Json {
self.as_str().to_json()
}
}
impl fmt::Display for SplitDebuginfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum StackProbeType {
/// Don't emit any stack probes.
None,
/// It is harmless to use this option even on targets that do not have backend support for
/// stack probes as the failure mode is the same as if no stack-probe option was specified in
/// the first place.
Inline,
/// Call `__rust_probestack` whenever stack needs to be probed.
Call,
/// Use inline option for LLVM versions later than specified in `min_llvm_version_for_inline`
/// and call `__rust_probestack` otherwise.
InlineOrCall { min_llvm_version_for_inline: (u32, u32, u32) },
}
impl StackProbeType {
fn from_json(json: &Json) -> Result<Self, String> {
let object = json.as_object().ok_or_else(|| "expected a JSON object")?;
let kind = object
.get("kind")
.and_then(|o| o.as_string())
.ok_or_else(|| "expected `kind` to be a string")?;
match kind {
"none" => Ok(StackProbeType::None),
"inline" => Ok(StackProbeType::Inline),
"call" => Ok(StackProbeType::Call),
"inline-or-call" => {
let min_version = object
.get("min-llvm-version-for-inline")
.and_then(|o| o.as_array())
.ok_or_else(|| "expected `min-llvm-version-for-inline` to be an array")?;
let mut iter = min_version.into_iter().map(|v| {
let int = v.as_u64().ok_or_else(
|| "expected `min-llvm-version-for-inline` values to be integers",
)?;
u32::try_from(int)
.map_err(|_| "`min-llvm-version-for-inline` values don't convert to u32")
});
let min_llvm_version_for_inline = (
iter.next().unwrap_or(Ok(11))?,
iter.next().unwrap_or(Ok(0))?,
iter.next().unwrap_or(Ok(0))?,
);
Ok(StackProbeType::InlineOrCall { min_llvm_version_for_inline })
}
_ => Err(String::from(
"`kind` expected to be one of `none`, `inline`, `call` or `inline-or-call`",
)),
}
}
}
impl ToJson for StackProbeType {
fn to_json(&self) -> Json {
Json::Object(match self {
StackProbeType::None => {
vec![(String::from("kind"), "none".to_json())].into_iter().collect()
}
StackProbeType::Inline => {
vec![(String::from("kind"), "inline".to_json())].into_iter().collect()
}
StackProbeType::Call => {
vec![(String::from("kind"), "call".to_json())].into_iter().collect()
}
StackProbeType::InlineOrCall { min_llvm_version_for_inline } => vec![
(String::from("kind"), "inline-or-call".to_json()),
(
String::from("min-llvm-version-for-inline"),
min_llvm_version_for_inline.to_json(),
),
]
.into_iter()
.collect(),
})
}
}
bitflags::bitflags! {
#[derive(Default, Encodable, Decodable)]
pub struct SanitizerSet: u8 {
const ADDRESS = 1 << 0;
const LEAK = 1 << 1;
const MEMORY = 1 << 2;
const THREAD = 1 << 3;
const HWADDRESS = 1 << 4;
}
}
impl SanitizerSet {
/// Return sanitizer's name
///
/// Returns none if the flags is a set of sanitizers numbering not exactly one.
fn as_str(self) -> Option<&'static str> {
Some(match self {
SanitizerSet::ADDRESS => "address",
SanitizerSet::LEAK => "leak",
SanitizerSet::MEMORY => "memory",
SanitizerSet::THREAD => "thread",
SanitizerSet::HWADDRESS => "hwaddress",
_ => return None,
})
}
}
/// Formats a sanitizer set as a comma separated list of sanitizers' names.
impl fmt::Display for SanitizerSet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut first = true;
for s in *self {
let name = s.as_str().unwrap_or_else(|| panic!("unrecognized sanitizer {:?}", s));
if !first {
f.write_str(", ")?;
}
f.write_str(name)?;
first = false;
}
Ok(())
}
}
impl IntoIterator for SanitizerSet {
type Item = SanitizerSet;
type IntoIter = std::vec::IntoIter<SanitizerSet>;
fn into_iter(self) -> Self::IntoIter {
[
SanitizerSet::ADDRESS,
SanitizerSet::LEAK,
SanitizerSet::MEMORY,
SanitizerSet::THREAD,
SanitizerSet::HWADDRESS,
]
.iter()
.copied()
.filter(|&s| self.contains(s))
.collect::<Vec<_>>()
.into_iter()
}
}
impl<CTX> HashStable<CTX> for SanitizerSet {
fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
self.bits().hash_stable(ctx, hasher);
}
}
impl ToJson for SanitizerSet {
fn to_json(&self) -> Json {
self.into_iter()
.map(|v| Some(v.as_str()?.to_json()))
.collect::<Option<Vec<_>>>()
.unwrap_or_default()
.to_json()
}
}
#[derive(Clone, Copy, PartialEq, Hash, Debug)]
pub enum FramePointer {
/// Forces the machine code generator to always preserve the frame pointers.
Always,
/// Forces the machine code generator to preserve the frame pointers except for the leaf
/// functions (i.e. those that don't call other functions).
NonLeaf,
/// Allows the machine code generator to omit the frame pointers.
///
/// This option does not guarantee that the frame pointers will be omitted.
MayOmit,
}
impl FromStr for FramePointer {
type Err = ();
fn from_str(s: &str) -> Result<Self, ()> {
Ok(match s {
"always" => Self::Always,
"non-leaf" => Self::NonLeaf,
"may-omit" => Self::MayOmit,
_ => return Err(()),
})
}
}
impl ToJson for FramePointer {
fn to_json(&self) -> Json {
match *self {
Self::Always => "always",
Self::NonLeaf => "non-leaf",
Self::MayOmit => "may-omit",
}
.to_json()
}
}
macro_rules! supported_targets {
( $(($( $triple:literal, )+ $module:ident ),)+ ) => {
$(mod $module;)+
/// List of supported targets
pub const TARGETS: &[&str] = &[$($($triple),+),+];
fn load_builtin(target: &str) -> Option<Target> {
let mut t = match target {
$( $($triple)|+ => $module::target(), )+
_ => return None,
};
t.is_builtin = true;
debug!("got builtin target: {:?}", t);
Some(t)
}
#[cfg(test)]
mod tests {
mod tests_impl;
// Cannot put this into a separate file without duplication, make an exception.
$(
#[test] // `#[test]`
fn $module() {
tests_impl::test_target(super::$module::target());
}
)+
}
};
}
supported_targets! {
("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu),
("x86_64-unknown-linux-gnux32", x86_64_unknown_linux_gnux32),
("i686-unknown-linux-gnu", i686_unknown_linux_gnu),
("i586-unknown-linux-gnu", i586_unknown_linux_gnu),
("mips-unknown-linux-gnu", mips_unknown_linux_gnu),
("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64),
("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64),
("mipsisa32r6-unknown-linux-gnu", mipsisa32r6_unknown_linux_gnu),
("mipsisa32r6el-unknown-linux-gnu", mipsisa32r6el_unknown_linux_gnu),
("mipsisa64r6-unknown-linux-gnuabi64", mipsisa64r6_unknown_linux_gnuabi64),
("mipsisa64r6el-unknown-linux-gnuabi64", mipsisa64r6el_unknown_linux_gnuabi64),
("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu),
("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu),
("powerpc-unknown-linux-gnuspe", powerpc_unknown_linux_gnuspe),
("powerpc-unknown-linux-musl", powerpc_unknown_linux_musl),
("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu),
("powerpc64-unknown-linux-musl", powerpc64_unknown_linux_musl),
("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
("powerpc64le-unknown-linux-musl", powerpc64le_unknown_linux_musl),
("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu),
("s390x-unknown-linux-musl", s390x_unknown_linux_musl),
("sparc-unknown-linux-gnu", sparc_unknown_linux_gnu),
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf),
("armv4t-unknown-linux-gnueabi", armv4t_unknown_linux_gnueabi),
("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi),
("armv5te-unknown-linux-musleabi", armv5te_unknown_linux_musleabi),
("armv5te-unknown-linux-uclibceabi", armv5te_unknown_linux_uclibceabi),
("armv7-unknown-linux-gnueabi", armv7_unknown_linux_gnueabi),
("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf),
("thumbv7neon-unknown-linux-gnueabihf", thumbv7neon_unknown_linux_gnueabihf),
("thumbv7neon-unknown-linux-musleabihf", thumbv7neon_unknown_linux_musleabihf),
("armv7-unknown-linux-musleabi", armv7_unknown_linux_musleabi),
("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf),
("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu),
("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl),
("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl),
("i686-unknown-linux-musl", i686_unknown_linux_musl),
("i586-unknown-linux-musl", i586_unknown_linux_musl),
("mips-unknown-linux-musl", mips_unknown_linux_musl),
("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl),
("mips64-unknown-linux-muslabi64", mips64_unknown_linux_muslabi64),
("mips64el-unknown-linux-muslabi64", mips64el_unknown_linux_muslabi64),
("hexagon-unknown-linux-musl", hexagon_unknown_linux_musl),
("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc),
("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc),
("i686-linux-android", i686_linux_android),
("x86_64-linux-android", x86_64_linux_android),
("arm-linux-androideabi", arm_linux_androideabi),
("armv7-linux-androideabi", armv7_linux_androideabi),
("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
("x86_64-unknown-none-linuxkernel", x86_64_unknown_none_linuxkernel),
("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
("armv6-unknown-freebsd", armv6_unknown_freebsd),
("armv7-unknown-freebsd", armv7_unknown_freebsd),
("i686-unknown-freebsd", i686_unknown_freebsd),
("powerpc-unknown-freebsd", powerpc_unknown_freebsd),
("powerpc64-unknown-freebsd", powerpc64_unknown_freebsd),
("powerpc64le-unknown-freebsd", powerpc64le_unknown_freebsd),
("x86_64-unknown-freebsd", x86_64_unknown_freebsd),
("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly),
("aarch64-unknown-openbsd", aarch64_unknown_openbsd),
("i686-unknown-openbsd", i686_unknown_openbsd),
("sparc64-unknown-openbsd", sparc64_unknown_openbsd),
("x86_64-unknown-openbsd", x86_64_unknown_openbsd),
("powerpc-unknown-openbsd", powerpc_unknown_openbsd),
("aarch64-unknown-netbsd", aarch64_unknown_netbsd),
("armv6-unknown-netbsd-eabihf", armv6_unknown_netbsd_eabihf),
("armv7-unknown-netbsd-eabihf", armv7_unknown_netbsd_eabihf),
("i686-unknown-netbsd", i686_unknown_netbsd),
("powerpc-unknown-netbsd", powerpc_unknown_netbsd),
("sparc64-unknown-netbsd", sparc64_unknown_netbsd),
("x86_64-unknown-netbsd", x86_64_unknown_netbsd),
("i686-unknown-haiku", i686_unknown_haiku),
("x86_64-unknown-haiku", x86_64_unknown_haiku),
("aarch64-apple-darwin", aarch64_apple_darwin),
("x86_64-apple-darwin", x86_64_apple_darwin),
("i686-apple-darwin", i686_apple_darwin),
("aarch64-fuchsia", aarch64_fuchsia),
("x86_64-fuchsia", x86_64_fuchsia),
("avr-unknown-gnu-atmega328", avr_unknown_gnu_atmega328),
("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc),
("aarch64-unknown-redox", aarch64_unknown_redox),
("x86_64-unknown-redox", x86_64_unknown_redox),
("i386-apple-ios", i386_apple_ios),
("x86_64-apple-ios", x86_64_apple_ios),
("aarch64-apple-ios", aarch64_apple_ios),
("armv7-apple-ios", armv7_apple_ios),
("armv7s-apple-ios", armv7s_apple_ios),
("x86_64-apple-ios-macabi", x86_64_apple_ios_macabi),
("aarch64-apple-ios-macabi", aarch64_apple_ios_macabi),
("aarch64-apple-ios-sim", aarch64_apple_ios_sim),
("aarch64-apple-tvos", aarch64_apple_tvos),
("x86_64-apple-tvos", x86_64_apple_tvos),
("armebv7r-none-eabi", armebv7r_none_eabi),
("armebv7r-none-eabihf", armebv7r_none_eabihf),
("armv7r-none-eabi", armv7r_none_eabi),
("armv7r-none-eabihf", armv7r_none_eabihf),
("x86_64-pc-solaris", x86_64_pc_solaris),
("x86_64-sun-solaris", x86_64_sun_solaris),
("sparcv9-sun-solaris", sparcv9_sun_solaris),
("x86_64-unknown-illumos", x86_64_unknown_illumos),
("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu),
("i686-pc-windows-gnu", i686_pc_windows_gnu),
("i686-uwp-windows-gnu", i686_uwp_windows_gnu),
("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu),
("aarch64-pc-windows-msvc", aarch64_pc_windows_msvc),
("aarch64-uwp-windows-msvc", aarch64_uwp_windows_msvc),
("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc),
("x86_64-uwp-windows-msvc", x86_64_uwp_windows_msvc),
("i686-pc-windows-msvc", i686_pc_windows_msvc),
("i686-uwp-windows-msvc", i686_uwp_windows_msvc),
("i586-pc-windows-msvc", i586_pc_windows_msvc),
("thumbv7a-pc-windows-msvc", thumbv7a_pc_windows_msvc),
("thumbv7a-uwp-windows-msvc", thumbv7a_uwp_windows_msvc),
("asmjs-unknown-emscripten", asmjs_unknown_emscripten),
("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
("wasm32-unknown-unknown", wasm32_unknown_unknown),
("wasm32-wasi", wasm32_wasi),
("wasm64-unknown-unknown", wasm64_unknown_unknown),
("thumbv6m-none-eabi", thumbv6m_none_eabi),
("thumbv7m-none-eabi", thumbv7m_none_eabi),
("thumbv7em-none-eabi", thumbv7em_none_eabi),
("thumbv7em-none-eabihf", thumbv7em_none_eabihf),
("thumbv8m.base-none-eabi", thumbv8m_base_none_eabi),
("thumbv8m.main-none-eabi", thumbv8m_main_none_eabi),
("thumbv8m.main-none-eabihf", thumbv8m_main_none_eabihf),
("armv7a-none-eabi", armv7a_none_eabi),
("armv7a-none-eabihf", armv7a_none_eabihf),
("msp430-none-elf", msp430_none_elf),
("aarch64-unknown-hermit", aarch64_unknown_hermit),
("x86_64-unknown-hermit", x86_64_unknown_hermit),
("x86_64-unknown-none-hermitkernel", x86_64_unknown_none_hermitkernel),
("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
("riscv32imc-esp-espidf", riscv32imc_esp_espidf),
("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf),
("riscv32gc-unknown-linux-gnu", riscv32gc_unknown_linux_gnu),
("riscv32gc-unknown-linux-musl", riscv32gc_unknown_linux_musl),
("riscv64imac-unknown-none-elf", riscv64imac_unknown_none_elf),
("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf),
("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu),
("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl),
("aarch64-unknown-none", aarch64_unknown_none),
("aarch64-unknown-none-softfloat", aarch64_unknown_none_softfloat),
("x86_64-fortanix-unknown-sgx", x86_64_fortanix_unknown_sgx),
("x86_64-unknown-uefi", x86_64_unknown_uefi),
("i686-unknown-uefi", i686_unknown_uefi),
("aarch64-unknown-uefi", aarch64_unknown_uefi),
("nvptx64-nvidia-cuda", nvptx64_nvidia_cuda),
("i686-wrs-vxworks", i686_wrs_vxworks),
("x86_64-wrs-vxworks", x86_64_wrs_vxworks),
("armv7-wrs-vxworks-eabihf", armv7_wrs_vxworks_eabihf),
("aarch64-wrs-vxworks", aarch64_wrs_vxworks),
("powerpc-wrs-vxworks", powerpc_wrs_vxworks),
("powerpc-wrs-vxworks-spe", powerpc_wrs_vxworks_spe),
("powerpc64-wrs-vxworks", powerpc64_wrs_vxworks),
("mipsel-sony-psp", mipsel_sony_psp),
("mipsel-unknown-none", mipsel_unknown_none),
("thumbv4t-none-eabi", thumbv4t_none_eabi),
("aarch64_be-unknown-linux-gnu", aarch64_be_unknown_linux_gnu),
("aarch64-unknown-linux-gnu_ilp32", aarch64_unknown_linux_gnu_ilp32),
("aarch64_be-unknown-linux-gnu_ilp32", aarch64_be_unknown_linux_gnu_ilp32),
("bpfeb-unknown-none", bpfeb_unknown_none),
("bpfel-unknown-none", bpfel_unknown_none),
}
/// Warnings encountered when parsing the target `json`.
///
/// Includes fields that weren't recognized and fields that don't have the expected type.
#[derive(Debug, PartialEq)]
pub struct TargetWarnings {
unused_fields: Vec<String>,
incorrect_type: Vec<String>,
}
impl TargetWarnings {
pub fn empty() -> Self {
Self { unused_fields: Vec::new(), incorrect_type: Vec::new() }
}
pub fn warning_messages(&self) -> Vec<String> {
let mut warnings = vec![];
if !self.unused_fields.is_empty() {
warnings.push(format!(
"target json file contains unused fields: {}",
self.unused_fields.join(", ")
));
}
if !self.incorrect_type.is_empty() {
warnings.push(format!(
"target json file contains fields whose value doesn't have the correct json type: {}",
self.incorrect_type.join(", ")
));
}
warnings
}
}
/// Everything `rustc` knows about how to compile for a specific target.
///
/// Every field here must be specified, and has no default value.
#[derive(PartialEq, Clone, Debug)]
pub struct Target {
/// Target triple to pass to LLVM.
pub llvm_target: String,
/// Number of bits in a pointer. Influences the `target_pointer_width` `cfg` variable.
pub pointer_width: u32,
/// Architecture to use for ABI considerations. Valid options include: "x86",
/// "x86_64", "arm", "aarch64", "mips", "powerpc", "powerpc64", and others.
pub arch: String,
/// [Data layout](https://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
pub data_layout: String,
/// Optional settings with defaults.
pub options: TargetOptions,
}
pub trait HasTargetSpec {
fn target_spec(&self) -> &Target;
}
impl HasTargetSpec for Target {
#[inline]
fn target_spec(&self) -> &Target {
self
}
}
/// Optional aspects of a target specification.
///
/// This has an implementation of `Default`, see each field for what the default is. In general,
/// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
///
/// `TargetOptions` as a separate structure is mostly an implementation detail of `Target`
/// construction, all its fields logically belong to `Target` and available from `Target`
/// through `Deref` impls.
#[derive(PartialEq, Clone, Debug)]
pub struct TargetOptions {
/// Whether the target is built-in or loaded from a custom target specification.
pub is_builtin: bool,
/// Used as the `target_endian` `cfg` variable. Defaults to little endian.
pub endian: Endian,
/// Width of c_int type. Defaults to "32".
pub c_int_width: String,
/// OS name to use for conditional compilation (`target_os`). Defaults to "none".
/// "none" implies a bare metal target without `std` library.
/// A couple of targets having `std` also use "unknown" as an `os` value,
/// but they are exceptions.
pub os: String,
/// Environment name to use for conditional compilation (`target_env`). Defaults to "".
pub env: String,
/// ABI name to distinguish multiple ABIs on the same OS and architecture. For instance, `"eabi"`
/// or `"eabihf"`. Defaults to "".
pub abi: String,
/// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown".
pub vendor: String,
/// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
/// on the command line. Defaults to `LinkerFlavor::Gcc`.
pub linker_flavor: LinkerFlavor,
/// Linker to invoke
pub linker: Option<String>,
/// LLD flavor used if `lld` (or `rust-lld`) is specified as a linker
/// without clarifying its flavor in any way.
pub lld_flavor: LldFlavor,
/// Linker arguments that are passed *before* any user-defined libraries.
pub pre_link_args: LinkArgs,
/// Objects to link before and after all other object code.
pub pre_link_objects: CrtObjects,
pub post_link_objects: CrtObjects,
/// Same as `(pre|post)_link_objects`, but when we fail to pull the objects with help of the
/// target's native gcc and fall back to the "self-contained" mode and pull them manually.
/// See `crt_objects.rs` for some more detailed documentation.
pub pre_link_objects_fallback: CrtObjects,
pub post_link_objects_fallback: CrtObjects,
/// Which logic to use to determine whether to fall back to the "self-contained" mode or not.
pub crt_objects_fallback: Option<CrtObjectsFallback>,
/// Linker arguments that are unconditionally passed after any
/// user-defined but before post-link objects. Standard platform
/// libraries that should be always be linked to, usually go here.
pub late_link_args: LinkArgs,
/// Linker arguments used in addition to `late_link_args` if at least one
/// Rust dependency is dynamically linked.
pub late_link_args_dynamic: LinkArgs,
/// Linker arguments used in addition to `late_link_args` if aall Rust
/// dependencies are statically linked.
pub late_link_args_static: LinkArgs,
/// Linker arguments that are unconditionally passed *after* any
/// user-defined libraries.
pub post_link_args: LinkArgs,
/// Optional link script applied to `dylib` and `executable` crate types.
/// This is a string containing the script, not a path. Can only be applied
/// to linkers where `linker_is_gnu` is true.
pub link_script: Option<String>,
/// Environment variables to be set for the linker invocation.
pub link_env: Vec<(String, String)>,
/// Environment variables to be removed for the linker invocation.
pub link_env_remove: Vec<String>,
/// Extra arguments to pass to the external assembler (when used)
pub asm_args: Vec<String>,
/// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults
/// to "generic".
pub cpu: String,
/// Default target features to pass to LLVM. These features will *always* be
/// passed, and cannot be disabled even via `-C`. Corresponds to `llc
/// -mattr=$features`.
pub features: String,
/// Whether dynamic linking is available on this target. Defaults to false.
pub dynamic_linking: bool,
/// If dynamic linking is available, whether only cdylibs are supported.
pub only_cdylib: bool,
/// Whether executables are available on this target. iOS, for example, only allows static
/// libraries. Defaults to false.
pub executables: bool,
/// Relocation model to use in object file. Corresponds to `llc
/// -relocation-model=$relocation_model`. Defaults to `Pic`.
pub relocation_model: RelocModel,
/// Code model to use. Corresponds to `llc -code-model=$code_model`.
/// Defaults to `None` which means "inherited from the base LLVM target".
pub code_model: Option<CodeModel>,
/// TLS model to use. Options are "global-dynamic" (default), "local-dynamic", "initial-exec"
/// and "local-exec". This is similar to the -ftls-model option in GCC/Clang.
pub tls_model: TlsModel,
/// Do not emit code that uses the "red zone", if the ABI has one. Defaults to false.
pub disable_redzone: bool,
/// Frame pointer mode for this target. Defaults to `MayOmit`.
pub frame_pointer: FramePointer,
/// Emit each function in its own section. Defaults to true.
pub function_sections: bool,
/// String to prepend to the name of every dynamic library. Defaults to "lib".
pub dll_prefix: String,
/// String to append to the name of every dynamic library. Defaults to ".so".
pub dll_suffix: String,
/// String to append to the name of every executable.
pub exe_suffix: String,
/// String to prepend to the name of every static library. Defaults to "lib".
pub staticlib_prefix: String,
/// String to append to the name of every static library. Defaults to ".a".
pub staticlib_suffix: String,
/// Values of the `target_family` cfg set for this target.
///
/// Common options are: "unix", "windows". Defaults to no families.
///
/// See <https://doc.rust-lang.org/reference/conditional-compilation.html#target_family>.
pub families: Vec<String>,
/// Whether the target toolchain's ABI supports returning small structs as an integer.
pub abi_return_struct_as_int: bool,
/// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
/// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
pub is_like_osx: bool,
/// Whether the target toolchain is like Solaris's.
/// Only useful for compiling against Illumos/Solaris,
/// as they have a different set of linker flags. Defaults to false.
pub is_like_solaris: bool,
/// Whether the target is like Windows.
/// This is a combination of several more specific properties represented as a single flag:
/// - The target uses a Windows ABI,
/// - uses PE/COFF as a format for object code,
/// - uses Windows-style dllexport/dllimport for shared libraries,
/// - uses import libraries and .def files for symbol exports,
/// - executables support setting a subsystem.
pub is_like_windows: bool,
/// Whether the target is like MSVC.
/// This is a combination of several more specific properties represented as a single flag:
/// - The target has all the properties from `is_like_windows`
/// (for in-tree targets "is_like_msvc ⇒ is_like_windows" is ensured by a unit test),
/// - has some MSVC-specific Windows ABI properties,
/// - uses a link.exe-like linker,
/// - uses CodeView/PDB for debuginfo and natvis for its visualization,
/// - uses SEH-based unwinding,
/// - supports control flow guard mechanism.
pub is_like_msvc: bool,
/// Whether the target toolchain is like Emscripten's. Only useful for compiling with
/// Emscripten toolchain.
/// Defaults to false.
pub is_like_emscripten: bool,
/// Whether the target toolchain is like Fuchsia's.
pub is_like_fuchsia: bool,
/// Whether a target toolchain is like WASM.
pub is_like_wasm: bool,
/// Version of DWARF to use if not using the default.
/// Useful because some platforms (osx, bsd) only want up to DWARF2.
pub dwarf_version: Option<u32>,
/// Whether the linker support GNU-like arguments such as -O. Defaults to true.
pub linker_is_gnu: bool,
/// The MinGW toolchain has a known issue that prevents it from correctly
/// handling COFF object files with more than 2<sup>15</sup> sections. Since each weak
/// symbol needs its own COMDAT section, weak linkage implies a large
/// number sections that easily exceeds the given limit for larger
/// codebases. Consequently we want a way to disallow weak linkage on some
/// platforms.
pub allows_weak_linkage: bool,
/// Whether the linker support rpaths or not. Defaults to false.
pub has_rpath: bool,
/// Whether to disable linking to the default libraries, typically corresponds
/// to `-nodefaultlibs`. Defaults to true.
pub no_default_libraries: bool,
/// Dynamically linked executables can be compiled as position independent
/// if the default relocation model of position independent code is not
/// changed. This is a requirement to take advantage of ASLR, as otherwise
/// the functions in the executable are not randomized and can be used
/// during an exploit of a vulnerability in any code.
pub position_independent_executables: bool,
/// Executables that are both statically linked and position-independent are supported.
pub static_position_independent_executables: bool,
/// Determines if the target always requires using the PLT for indirect
/// library calls or not. This controls the default value of the `-Z plt` flag.
pub needs_plt: bool,
/// Either partial, full, or off. Full RELRO makes the dynamic linker
/// resolve all symbols at startup and marks the GOT read-only before
/// starting the program, preventing overwriting the GOT.
pub relro_level: RelroLevel,
/// Format that archives should be emitted in. This affects whether we use
/// LLVM to assemble an archive or fall back to the system linker, and
/// currently only "gnu" is used to fall into LLVM. Unknown strings cause
/// the system linker to be used.
pub archive_format: String,
/// Is asm!() allowed? Defaults to true.
pub allow_asm: bool,
/// Whether the runtime startup code requires the `main` function be passed
/// `argc` and `argv` values.
pub main_needs_argc_argv: bool,
/// Flag indicating whether ELF TLS (e.g., #[thread_local]) is available for
/// this target.
pub has_elf_tls: bool,
// This is mainly for easy compatibility with emscripten.
// If we give emcc .o files that are actually .bc files it
// will 'just work'.
pub obj_is_bitcode: bool,
/// Whether the target requires that emitted object code includes bitcode.
pub forces_embed_bitcode: bool,
/// Content of the LLVM cmdline section associated with embedded bitcode.
pub bitcode_llvm_cmdline: String,
/// Don't use this field; instead use the `.min_atomic_width()` method.
pub min_atomic_width: Option<u64>,
/// Don't use this field; instead use the `.max_atomic_width()` method.
pub max_atomic_width: Option<u64>,
/// Whether the target supports atomic CAS operations natively
pub atomic_cas: bool,
/// Panic strategy: "unwind" or "abort"
pub panic_strategy: PanicStrategy,
/// Whether or not linking dylibs to a static CRT is allowed.
pub crt_static_allows_dylibs: bool,
/// Whether or not the CRT is statically linked by default.
pub crt_static_default: bool,
/// Whether or not crt-static is respected by the compiler (or is a no-op).
pub crt_static_respected: bool,
/// The implementation of stack probes to use.
pub stack_probes: StackProbeType,
/// The minimum alignment for global symbols.
pub min_global_align: Option<u64>,
/// Default number of codegen units to use in debug mode
pub default_codegen_units: Option<u64>,
/// Whether to generate trap instructions in places where optimization would
/// otherwise produce control flow that falls through into unrelated memory.
pub trap_unreachable: bool,
/// This target requires everything to be compiled with LTO to emit a final
/// executable, aka there is no native linker for this target.
pub requires_lto: bool,
/// This target has no support for threads.
pub singlethread: bool,
/// Whether library functions call lowering/optimization is disabled in LLVM
/// for this target unconditionally.
pub no_builtins: bool,
/// The default visibility for symbols in this target should be "hidden"
/// rather than "default"
pub default_hidden_visibility: bool,
/// Whether a .debug_gdb_scripts section will be added to the output object file
pub emit_debug_gdb_scripts: bool,
/// Whether or not to unconditionally `uwtable` attributes on functions,
/// typically because the platform needs to unwind for things like stack
/// unwinders.
pub requires_uwtable: bool,
/// Whether or not to emit `uwtable` attributes on functions if `-C force-unwind-tables`
/// is not specified and `uwtable` is not required on this target.
pub default_uwtable: bool,
/// Whether or not SIMD types are passed by reference in the Rust ABI,
/// typically required if a target can be compiled with a mixed set of
/// target features. This is `true` by default, and `false` for targets like
/// wasm32 where the whole program either has simd or not.
pub simd_types_indirect: bool,
/// Pass a list of symbol which should be exported in the dylib to the linker.
pub limit_rdylib_exports: bool,
/// If set, have the linker export exactly these symbols, instead of using
/// the usual logic to figure this out from the crate itself.
pub override_export_symbols: Option<Vec<String>>,
/// Determines how or whether the MergeFunctions LLVM pass should run for
/// this target. Either "disabled", "trampolines", or "aliases".
/// The MergeFunctions pass is generally useful, but some targets may need
/// to opt out. The default is "aliases".
///
/// Workaround for: <https://github.com/rust-lang/rust/issues/57356>
pub merge_functions: MergeFunctions,
/// Use platform dependent mcount function
pub mcount: String,
/// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers
pub llvm_abiname: String,
/// Whether or not RelaxElfRelocation flag will be passed to the linker
pub relax_elf_relocations: bool,
/// Additional arguments to pass to LLVM, similar to the `-C llvm-args` codegen option.
pub llvm_args: Vec<String>,
/// Whether to use legacy .ctors initialization hooks rather than .init_array. Defaults
/// to false (uses .init_array).
pub use_ctors_section: bool,
/// Whether the linker is instructed to add a `GNU_EH_FRAME` ELF header
/// used to locate unwinding information is passed
/// (only has effect if the linker is `ld`-like).
pub eh_frame_header: bool,
/// Is true if the target is an ARM architecture using thumb v1 which allows for
/// thumb and arm interworking.
pub has_thumb_interworking: bool,
/// How to handle split debug information, if at all. Specifying `None` has
/// target-specific meaning.
pub split_debuginfo: SplitDebuginfo,
/// The sanitizers supported by this target
///
/// Note that the support here is at a codegen level. If the machine code with sanitizer
/// enabled can generated on this target, but the necessary supporting libraries are not
/// distributed with the target, the sanitizer should still appear in this list for the target.
pub supported_sanitizers: SanitizerSet,
/// If present it's a default value to use for adjusting the C ABI.
pub default_adjusted_cabi: Option<Abi>,
/// Minimum number of bits in #[repr(C)] enum. Defaults to 32.
pub c_enum_min_bits: u64,
}
impl Default for TargetOptions {
/// Creates a set of "sane defaults" for any target. This is still
/// incomplete, and if used for compilation, will certainly not work.
fn default() -> TargetOptions {
TargetOptions {
is_builtin: false,
endian: Endian::Little,
c_int_width: "32".to_string(),
os: "none".to_string(),
env: String::new(),
abi: String::new(),
vendor: "unknown".to_string(),
linker_flavor: LinkerFlavor::Gcc,
linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.to_string()),
lld_flavor: LldFlavor::Ld,
pre_link_args: LinkArgs::new(),
post_link_args: LinkArgs::new(),
link_script: None,
asm_args: Vec::new(),
cpu: "generic".to_string(),
features: String::new(),
dynamic_linking: false,
only_cdylib: false,
executables: false,
relocation_model: RelocModel::Pic,
code_model: None,
tls_model: TlsModel::GeneralDynamic,
disable_redzone: false,
frame_pointer: FramePointer::MayOmit,
function_sections: true,
dll_prefix: "lib".to_string(),
dll_suffix: ".so".to_string(),
exe_suffix: String::new(),
staticlib_prefix: "lib".to_string(),
staticlib_suffix: ".a".to_string(),
families: Vec::new(),
abi_return_struct_as_int: false,
is_like_osx: false,
is_like_solaris: false,
is_like_windows: false,
is_like_emscripten: false,
is_like_msvc: false,
is_like_fuchsia: false,
is_like_wasm: false,
dwarf_version: None,
linker_is_gnu: true,
allows_weak_linkage: true,
has_rpath: false,
no_default_libraries: true,
position_independent_executables: false,
static_position_independent_executables: false,
needs_plt: false,
relro_level: RelroLevel::None,
pre_link_objects: Default::default(),
post_link_objects: Default::default(),
pre_link_objects_fallback: Default::default(),
post_link_objects_fallback: Default::default(),
crt_objects_fallback: None,
late_link_args: LinkArgs::new(),
late_link_args_dynamic: LinkArgs::new(),
late_link_args_static: LinkArgs::new(),
link_env: Vec::new(),
link_env_remove: Vec::new(),
archive_format: "gnu".to_string(),
main_needs_argc_argv: true,
allow_asm: true,
has_elf_tls: false,
obj_is_bitcode: false,
forces_embed_bitcode: false,
bitcode_llvm_cmdline: String::new(),
min_atomic_width: None,
max_atomic_width: None,
atomic_cas: true,
panic_strategy: PanicStrategy::Unwind,
crt_static_allows_dylibs: false,
crt_static_default: false,
crt_static_respected: false,
stack_probes: StackProbeType::None,
min_global_align: None,
default_codegen_units: None,
trap_unreachable: true,
requires_lto: false,
singlethread: false,
no_builtins: false,
default_hidden_visibility: false,
emit_debug_gdb_scripts: true,
requires_uwtable: false,
default_uwtable: false,
simd_types_indirect: true,
limit_rdylib_exports: true,
override_export_symbols: None,
merge_functions: MergeFunctions::Aliases,
mcount: "mcount".to_string(),
llvm_abiname: "".to_string(),
relax_elf_relocations: false,
llvm_args: vec![],
use_ctors_section: false,
eh_frame_header: true,
has_thumb_interworking: false,
split_debuginfo: SplitDebuginfo::Off,
supported_sanitizers: SanitizerSet::empty(),
default_adjusted_cabi: None,
c_enum_min_bits: 32,
}
}
}
/// `TargetOptions` being a separate type is basically an implementation detail of `Target` that is
/// used for providing defaults. Perhaps there's a way to merge `TargetOptions` into `Target` so
/// this `Deref` implementation is no longer necessary.
impl Deref for Target {
type Target = TargetOptions;
fn deref(&self) -> &Self::Target {
&self.options
}
}
impl DerefMut for Target {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.options
}
}
impl Target {
/// Given a function ABI, turn it into the correct ABI for this target.
pub fn adjust_abi(&self, abi: Abi) -> Abi {
match abi {
Abi::C { .. } => self.default_adjusted_cabi.unwrap_or(abi),
Abi::System { unwind } if self.is_like_windows && self.arch == "x86" => {
Abi::Stdcall { unwind }
}
Abi::System { unwind } => Abi::C { unwind },
Abi::EfiApi if self.arch == "x86_64" => Abi::Win64,
Abi::EfiApi => Abi::C { unwind: false },
// See commentary in `is_abi_supported`.
Abi::Stdcall { .. } | Abi::Thiscall { .. } if self.arch == "x86" => abi,
Abi::Stdcall { unwind } | Abi::Thiscall { unwind } => Abi::C { unwind },
Abi::Fastcall if self.arch == "x86" => abi,
Abi::Vectorcall if ["x86", "x86_64"].contains(&&self.arch[..]) => abi,
Abi::Fastcall | Abi::Vectorcall => Abi::C { unwind: false },
abi => abi,
}
}
/// Returns a None if the UNSUPPORTED_CALLING_CONVENTIONS lint should be emitted
pub fn is_abi_supported(&self, abi: Abi) -> Option<bool> {
use Abi::*;
Some(match abi {
Rust
| C { .. }
| System { .. }
| RustIntrinsic
| RustCall
| PlatformIntrinsic
| Unadjusted
| Cdecl
| EfiApi => true,
X86Interrupt => ["x86", "x86_64"].contains(&&self.arch[..]),
Aapcs => "arm" == self.arch,
CCmseNonSecureCall => ["arm", "aarch64"].contains(&&self.arch[..]),
Win64 | SysV64 => self.arch == "x86_64",
PtxKernel => self.arch == "nvptx64",
Msp430Interrupt => self.arch == "msp430",
AmdGpuKernel => self.arch == "amdgcn",
AvrInterrupt | AvrNonBlockingInterrupt => self.arch == "avr",
Wasm => ["wasm32", "wasm64"].contains(&&self.arch[..]),
// On windows these fall-back to platform native calling convention (C) when the
// architecture is not supported.
//
// This is I believe a historical accident that has occurred as part of Microsoft
// striving to allow most of the code to "just" compile when support for 64-bit x86
// was added and then later again, when support for ARM architectures was added.
//
// This is well documented across MSDN. Support for this in Rust has been added in
// #54576. This makes much more sense in context of Microsoft's C++ than it does in
// Rust, but there isn't much leeway remaining here to change it back at the time this
// comment has been written.
//
// Following are the relevant excerpts from the MSDN documentation.
//
// > The __vectorcall calling convention is only supported in native code on x86 and
// x64 processors that include Streaming SIMD Extensions 2 (SSE2) and above.
// > ...
// > On ARM machines, __vectorcall is accepted and ignored by the compiler.
//
// -- https://docs.microsoft.com/en-us/cpp/cpp/vectorcall?view=msvc-160
//
// > On ARM and x64 processors, __stdcall is accepted and ignored by the compiler;
//
// -- https://docs.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-160
//
// > In most cases, keywords or compiler switches that specify an unsupported
// > convention on a particular platform are ignored, and the platform default
// > convention is used.
//
// -- https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions
Stdcall { .. } | Fastcall | Thiscall { .. } | Vectorcall if self.is_like_windows => {
true
}
// Outside of Windows we want to only support these calling conventions for the
// architectures for which these calling conventions are actually well defined.
Stdcall { .. } | Fastcall | Thiscall { .. } if self.arch == "x86" => true,
Vectorcall if ["x86", "x86_64"].contains(&&self.arch[..]) => true,
// Return a `None` for other cases so that we know to emit a future compat lint.
Stdcall { .. } | Fastcall | Thiscall { .. } | Vectorcall => return None,
})
}
/// Minimum integer size in bits that this target can perform atomic
/// operations on.
pub fn min_atomic_width(&self) -> u64 {
self.min_atomic_width.unwrap_or(8)
}
/// Maximum integer size in bits that this target can perform atomic
/// operations on.
pub fn max_atomic_width(&self) -> u64 {
self.max_atomic_width.unwrap_or_else(|| self.pointer_width.into())
}
/// Loads a target descriptor from a JSON object.
pub fn from_json(mut obj: Json) -> Result<(Target, TargetWarnings), String> {
// While ugly, this code must remain this way to retain
// compatibility with existing JSON fields and the internal
// expected naming of the Target and TargetOptions structs.
// To ensure compatibility is retained, the built-in targets
// are round-tripped through this code to catch cases where
// the JSON parser is not updated to match the structs.
let mut get_req_field = |name: &str| {
obj.remove_key(name)
.and_then(|j| Json::as_string(&j).map(str::to_string))
.ok_or_else(|| format!("Field {} in target specification is required", name))
};
let mut base = Target {
llvm_target: get_req_field("llvm-target")?,
pointer_width: get_req_field("target-pointer-width")?
.parse::<u32>()
.map_err(|_| "target-pointer-width must be an integer".to_string())?,
data_layout: get_req_field("data-layout")?,
arch: get_req_field("arch")?,
options: Default::default(),
};
let mut incorrect_type = vec![];
macro_rules! key {
($key_name:ident) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_string(&j).map(str::to_string)) {
base.$key_name = s;
}
} );
($key_name:ident = $json_name:expr) => ( {
let name = $json_name;
if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_string(&j).map(str::to_string)) {
base.$key_name = s;
}
} );
($key_name:ident, bool) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_boolean(&j)) {
base.$key_name = s;
}
} );
($key_name:ident, u64) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_u64(&j)) {
base.$key_name = s;
}
} );
($key_name:ident, Option<u32>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_u64(&j)) {
if s < 1 || s > 5 {
return Err("Not a valid DWARF version number".to_string());
}
base.$key_name = Some(s as u32);
}
} );
($key_name:ident, Option<u64>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(s) = obj.remove_key(&name).and_then(|j| Json::as_u64(&j)) {
base.$key_name = Some(s);
}
} );
($key_name:ident, MergeFunctions) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<MergeFunctions>() {
Ok(mergefunc) => base.$key_name = mergefunc,
_ => return Some(Err(format!("'{}' is not a valid value for \
merge-functions. Use 'disabled', \
'trampolines', or 'aliases'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, RelocModel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<RelocModel>() {
Ok(relocation_model) => base.$key_name = relocation_model,
_ => return Some(Err(format!("'{}' is not a valid relocation model. \
Run `rustc --print relocation-models` to \
see the list of supported values.", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, CodeModel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<CodeModel>() {
Ok(code_model) => base.$key_name = Some(code_model),
_ => return Some(Err(format!("'{}' is not a valid code model. \
Run `rustc --print code-models` to \
see the list of supported values.", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, TlsModel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<TlsModel>() {
Ok(tls_model) => base.$key_name = tls_model,
_ => return Some(Err(format!("'{}' is not a valid TLS model. \
Run `rustc --print tls-models` to \
see the list of supported values.", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, PanicStrategy) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s {
"unwind" => base.$key_name = PanicStrategy::Unwind,
"abort" => base.$key_name = PanicStrategy::Abort,
_ => return Some(Err(format!("'{}' is not a valid value for \
panic-strategy. Use 'unwind' or 'abort'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, RelroLevel) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<RelroLevel>() {
Ok(level) => base.$key_name = level,
_ => return Some(Err(format!("'{}' is not a valid value for \
relro-level. Use 'full', 'partial, or 'off'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, SplitDebuginfo) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<SplitDebuginfo>() {
Ok(level) => base.$key_name = level,
_ => return Some(Err(format!("'{}' is not a valid value for \
split-debuginfo. Use 'off' or 'dsymutil'.",
s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, list) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(j) = obj.remove_key(&name){
if let Some(v) = Json::as_array(&j) {
base.$key_name = v.iter()
.map(|a| a.as_string().unwrap().to_string())
.collect();
} else {
incorrect_type.push(name)
}
}
} );
($key_name:ident, opt_list) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(j) = obj.remove_key(&name) {
if let Some(v) = Json::as_array(&j) {
base.$key_name = Some(v.iter()
.map(|a| a.as_string().unwrap().to_string())
.collect());
} else {
incorrect_type.push(name)
}
}
} );
($key_name:ident, optional) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(o) = obj.remove_key(&name[..]) {
base.$key_name = o
.as_string()
.map(|s| s.to_string() );
}
} );
($key_name:ident, LldFlavor) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
if let Some(flavor) = LldFlavor::from_str(&s) {
base.$key_name = flavor;
} else {
return Some(Err(format!(
"'{}' is not a valid value for lld-flavor. \
Use 'darwin', 'gnu', 'link' or 'wasm.",
s)))
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, LinkerFlavor) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match LinkerFlavor::from_str(s) {
Some(linker_flavor) => base.$key_name = linker_flavor,
_ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \
Use {}", s, LinkerFlavor::one_of()))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, StackProbeType) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| match StackProbeType::from_json(&o) {
Ok(v) => {
base.$key_name = v;
Some(Ok(()))
},
Err(s) => Some(Err(
format!("`{:?}` is not a valid value for `{}`: {}", o, name, s)
)),
}).unwrap_or(Ok(()))
} );
($key_name:ident, SanitizerSet) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(o) = obj.remove_key(&name[..]) {
if let Some(a) = o.as_array() {
for s in a {
base.$key_name |= match s.as_string() {
Some("address") => SanitizerSet::ADDRESS,
Some("leak") => SanitizerSet::LEAK,
Some("memory") => SanitizerSet::MEMORY,
Some("thread") => SanitizerSet::THREAD,
Some("hwaddress") => SanitizerSet::HWADDRESS,
Some(s) => return Err(format!("unknown sanitizer {}", s)),
_ => return Err(format!("not a string: {:?}", s)),
};
}
} else {
incorrect_type.push(name)
}
}
Ok::<(), String>(())
} );
($key_name:ident, crt_objects_fallback) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match s.parse::<CrtObjectsFallback>() {
Ok(fallback) => base.$key_name = Some(fallback),
_ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \
Use 'musl', 'mingw' or 'wasm'", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, link_objects) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(val) = obj.remove_key(&name[..]) {
let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
JSON object with fields per CRT object kind.", name))?;
let mut args = CrtObjects::new();
for (k, v) in obj {
let kind = LinkOutputKind::from_str(&k).ok_or_else(|| {
format!("{}: '{}' is not a valid value for CRT object kind. \
Use '(dynamic,static)-(nopic,pic)-exe' or \
'(dynamic,static)-dylib' or 'wasi-reactor-exe'", name, k)
})?;
let v = v.as_array().ok_or_else(||
format!("{}.{}: expected a JSON array", name, k)
)?.iter().enumerate()
.map(|(i,s)| {
let s = s.as_string().ok_or_else(||
format!("{}.{}[{}]: expected a JSON string", name, k, i))?;
Ok(s.to_owned())
})
.collect::<Result<Vec<_>, String>>()?;
args.insert(kind, v);
}
base.$key_name = args;
}
} );
($key_name:ident, link_args) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(val) = obj.remove_key(&name[..]) {
let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
JSON object with fields per linker-flavor.", name))?;
let mut args = LinkArgs::new();
for (k, v) in obj {
let flavor = LinkerFlavor::from_str(&k).ok_or_else(|| {
format!("{}: '{}' is not a valid value for linker-flavor. \<|fim▁hole|>
let v = v.as_array().ok_or_else(||
format!("{}.{}: expected a JSON array", name, k)
)?.iter().enumerate()
.map(|(i,s)| {
let s = s.as_string().ok_or_else(||
format!("{}.{}[{}]: expected a JSON string", name, k, i))?;
Ok(s.to_owned())
})
.collect::<Result<Vec<_>, String>>()?;
args.insert(flavor, v);
}
base.$key_name = args;
}
} );
($key_name:ident, env) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(o) = obj.remove_key(&name[..]) {
if let Some(a) = o.as_array() {
for o in a {
if let Some(s) = o.as_string() {
let p = s.split('=').collect::<Vec<_>>();
if p.len() == 2 {
let k = p[0].to_string();
let v = p[1].to_string();
base.$key_name.push((k, v));
}
}
}
} else {
incorrect_type.push(name)
}
}
} );
($key_name:ident, Option<Abi>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.remove_key(&name[..]).and_then(|o| o.as_string().and_then(|s| {
match lookup_abi(s) {
Some(abi) => base.$key_name = Some(abi),
_ => return Some(Err(format!("'{}' is not a valid value for abi", s))),
}
Some(Ok(()))
})).unwrap_or(Ok(()))
} );
($key_name:ident, TargetFamilies) => ( {
if let Some(value) = obj.remove_key("target-family") {
if let Some(v) = Json::as_array(&value) {
base.$key_name = v.iter()
.map(|a| a.as_string().unwrap().to_string())
.collect();
} else if let Some(v) = Json::as_string(&value) {
base.$key_name = vec![v.to_string()];
}
}
} );
}
if let Some(j) = obj.remove_key("target-endian") {
if let Some(s) = Json::as_string(&j) {
base.endian = s.parse()?;
} else {
incorrect_type.push("target-endian".to_string())
}
}
if let Some(fp) = obj.remove_key("frame-pointer") {
if let Some(s) = Json::as_string(&fp) {
base.frame_pointer = s
.parse()
.map_err(|()| format!("'{}' is not a valid value for frame-pointer", s))?;
} else {
incorrect_type.push("frame-pointer".to_string())
}
}
key!(is_builtin, bool);
key!(c_int_width = "target-c-int-width");
key!(os);
key!(env);
key!(abi);
key!(vendor);
key!(linker_flavor, LinkerFlavor)?;
key!(linker, optional);
key!(lld_flavor, LldFlavor)?;
key!(pre_link_objects, link_objects);
key!(post_link_objects, link_objects);
key!(pre_link_objects_fallback, link_objects);
key!(post_link_objects_fallback, link_objects);
key!(crt_objects_fallback, crt_objects_fallback)?;
key!(pre_link_args, link_args);
key!(late_link_args, link_args);
key!(late_link_args_dynamic, link_args);
key!(late_link_args_static, link_args);
key!(post_link_args, link_args);
key!(link_script, optional);
key!(link_env, env);
key!(link_env_remove, list);
key!(asm_args, list);
key!(cpu);
key!(features);
key!(dynamic_linking, bool);
key!(only_cdylib, bool);
key!(executables, bool);
key!(relocation_model, RelocModel)?;
key!(code_model, CodeModel)?;
key!(tls_model, TlsModel)?;
key!(disable_redzone, bool);
key!(function_sections, bool);
key!(dll_prefix);
key!(dll_suffix);
key!(exe_suffix);
key!(staticlib_prefix);
key!(staticlib_suffix);
key!(families, TargetFamilies);
key!(abi_return_struct_as_int, bool);
key!(is_like_osx, bool);
key!(is_like_solaris, bool);
key!(is_like_windows, bool);
key!(is_like_msvc, bool);
key!(is_like_emscripten, bool);
key!(is_like_fuchsia, bool);
key!(is_like_wasm, bool);
key!(dwarf_version, Option<u32>);
key!(linker_is_gnu, bool);
key!(allows_weak_linkage, bool);
key!(has_rpath, bool);
key!(no_default_libraries, bool);
key!(position_independent_executables, bool);
key!(static_position_independent_executables, bool);
key!(needs_plt, bool);
key!(relro_level, RelroLevel)?;
key!(archive_format);
key!(allow_asm, bool);
key!(main_needs_argc_argv, bool);
key!(has_elf_tls, bool);
key!(obj_is_bitcode, bool);
key!(forces_embed_bitcode, bool);
key!(bitcode_llvm_cmdline);
key!(max_atomic_width, Option<u64>);
key!(min_atomic_width, Option<u64>);
key!(atomic_cas, bool);
key!(panic_strategy, PanicStrategy)?;
key!(crt_static_allows_dylibs, bool);
key!(crt_static_default, bool);
key!(crt_static_respected, bool);
key!(stack_probes, StackProbeType)?;
key!(min_global_align, Option<u64>);
key!(default_codegen_units, Option<u64>);
key!(trap_unreachable, bool);
key!(requires_lto, bool);
key!(singlethread, bool);
key!(no_builtins, bool);
key!(default_hidden_visibility, bool);
key!(emit_debug_gdb_scripts, bool);
key!(requires_uwtable, bool);
key!(default_uwtable, bool);
key!(simd_types_indirect, bool);
key!(limit_rdylib_exports, bool);
key!(override_export_symbols, opt_list);
key!(merge_functions, MergeFunctions)?;
key!(mcount = "target-mcount");
key!(llvm_abiname);
key!(relax_elf_relocations, bool);
key!(llvm_args, list);
key!(use_ctors_section, bool);
key!(eh_frame_header, bool);
key!(has_thumb_interworking, bool);
key!(split_debuginfo, SplitDebuginfo)?;
key!(supported_sanitizers, SanitizerSet)?;
key!(default_adjusted_cabi, Option<Abi>)?;
key!(c_enum_min_bits, u64);
if base.is_builtin {
// This can cause unfortunate ICEs later down the line.
return Err("may not set is_builtin for targets not built-in".to_string());
}
// Each field should have been read using `Json::remove_key` so any keys remaining are unused.
let remaining_keys = obj.as_object().ok_or("Expected JSON object for target")?.keys();
Ok((
base,
TargetWarnings { unused_fields: remaining_keys.cloned().collect(), incorrect_type },
))
}
/// Search for a JSON file specifying the given target triple.
///
/// If none is found in `$RUST_TARGET_PATH`, look for a file called `target.json` inside the
/// sysroot under the target-triple's `rustlib` directory. Note that it could also just be a
/// bare filename already, so also check for that. If one of the hardcoded targets we know
/// about, just return it directly.
///
/// The error string could come from any of the APIs called, including filesystem access and
/// JSON decoding.
pub fn search(
target_triple: &TargetTriple,
sysroot: &PathBuf,
) -> Result<(Target, TargetWarnings), String> {
use rustc_serialize::json;
use std::env;
use std::fs;
fn load_file(path: &Path) -> Result<(Target, TargetWarnings), String> {
let contents = fs::read(path).map_err(|e| e.to_string())?;
let obj = json::from_reader(&mut &contents[..]).map_err(|e| e.to_string())?;
Target::from_json(obj)
}
match *target_triple {
TargetTriple::TargetTriple(ref target_triple) => {
// check if triple is in list of built-in targets
if let Some(t) = load_builtin(target_triple) {
return Ok((t, TargetWarnings::empty()));
}
// search for a file named `target_triple`.json in RUST_TARGET_PATH
let path = {
let mut target = target_triple.to_string();
target.push_str(".json");
PathBuf::from(target)
};
let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_default();
for dir in env::split_paths(&target_path) {
let p = dir.join(&path);
if p.is_file() {
return load_file(&p);
}
}
// Additionally look in the sysroot under `lib/rustlib/<triple>/target.json`
// as a fallback.
let rustlib_path = crate::target_rustlib_path(&sysroot, &target_triple);
let p = std::array::IntoIter::new([
Path::new(sysroot),
Path::new(&rustlib_path),
Path::new("target.json"),
])
.collect::<PathBuf>();
if p.is_file() {
return load_file(&p);
}
Err(format!("Could not find specification for target {:?}", target_triple))
}
TargetTriple::TargetPath(ref target_path) => {
if target_path.is_file() {
return load_file(&target_path);
}
Err(format!("Target path {:?} is not a valid file", target_path))
}
}
}
}
impl ToJson for Target {
fn to_json(&self) -> Json {
let mut d = BTreeMap::new();
let default: TargetOptions = Default::default();
macro_rules! target_val {
($attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
d.insert(name, self.$attr.to_json());
}};
($attr:ident, $key_name:expr) => {{
let name = $key_name;
d.insert(name.to_string(), self.$attr.to_json());
}};
}
macro_rules! target_option_val {
($attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.$attr {
d.insert(name, self.$attr.to_json());
}
}};
($attr:ident, $key_name:expr) => {{
let name = $key_name;
if default.$attr != self.$attr {
d.insert(name.to_string(), self.$attr.to_json());
}
}};
(link_args - $attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.$attr {
let obj = self
.$attr
.iter()
.map(|(k, v)| (k.desc().to_owned(), v.clone()))
.collect::<BTreeMap<_, _>>();
d.insert(name, obj.to_json());
}
}};
(env - $attr:ident) => {{
let name = (stringify!($attr)).replace("_", "-");
if default.$attr != self.$attr {
let obj = self
.$attr
.iter()
.map(|&(ref k, ref v)| k.clone() + "=" + &v)
.collect::<Vec<_>>();
d.insert(name, obj.to_json());
}
}};
}
target_val!(llvm_target);
d.insert("target-pointer-width".to_string(), self.pointer_width.to_string().to_json());
target_val!(arch);
target_val!(data_layout);
target_option_val!(is_builtin);
target_option_val!(endian, "target-endian");
target_option_val!(c_int_width, "target-c-int-width");
target_option_val!(os);
target_option_val!(env);
target_option_val!(abi);
target_option_val!(vendor);
target_option_val!(linker_flavor);
target_option_val!(linker);
target_option_val!(lld_flavor);
target_option_val!(pre_link_objects);
target_option_val!(post_link_objects);
target_option_val!(pre_link_objects_fallback);
target_option_val!(post_link_objects_fallback);
target_option_val!(crt_objects_fallback);
target_option_val!(link_args - pre_link_args);
target_option_val!(link_args - late_link_args);
target_option_val!(link_args - late_link_args_dynamic);
target_option_val!(link_args - late_link_args_static);
target_option_val!(link_args - post_link_args);
target_option_val!(link_script);
target_option_val!(env - link_env);
target_option_val!(link_env_remove);
target_option_val!(asm_args);
target_option_val!(cpu);
target_option_val!(features);
target_option_val!(dynamic_linking);
target_option_val!(only_cdylib);
target_option_val!(executables);
target_option_val!(relocation_model);
target_option_val!(code_model);
target_option_val!(tls_model);
target_option_val!(disable_redzone);
target_option_val!(frame_pointer);
target_option_val!(function_sections);
target_option_val!(dll_prefix);
target_option_val!(dll_suffix);
target_option_val!(exe_suffix);
target_option_val!(staticlib_prefix);
target_option_val!(staticlib_suffix);
target_option_val!(families, "target-family");
target_option_val!(abi_return_struct_as_int);
target_option_val!(is_like_osx);
target_option_val!(is_like_solaris);
target_option_val!(is_like_windows);
target_option_val!(is_like_msvc);
target_option_val!(is_like_emscripten);
target_option_val!(is_like_fuchsia);
target_option_val!(is_like_wasm);
target_option_val!(dwarf_version);
target_option_val!(linker_is_gnu);
target_option_val!(allows_weak_linkage);
target_option_val!(has_rpath);
target_option_val!(no_default_libraries);
target_option_val!(position_independent_executables);
target_option_val!(static_position_independent_executables);
target_option_val!(needs_plt);
target_option_val!(relro_level);
target_option_val!(archive_format);
target_option_val!(allow_asm);
target_option_val!(main_needs_argc_argv);
target_option_val!(has_elf_tls);
target_option_val!(obj_is_bitcode);
target_option_val!(forces_embed_bitcode);
target_option_val!(bitcode_llvm_cmdline);
target_option_val!(min_atomic_width);
target_option_val!(max_atomic_width);
target_option_val!(atomic_cas);
target_option_val!(panic_strategy);
target_option_val!(crt_static_allows_dylibs);
target_option_val!(crt_static_default);
target_option_val!(crt_static_respected);
target_option_val!(stack_probes);
target_option_val!(min_global_align);
target_option_val!(default_codegen_units);
target_option_val!(trap_unreachable);
target_option_val!(requires_lto);
target_option_val!(singlethread);
target_option_val!(no_builtins);
target_option_val!(default_hidden_visibility);
target_option_val!(emit_debug_gdb_scripts);
target_option_val!(requires_uwtable);
target_option_val!(default_uwtable);
target_option_val!(simd_types_indirect);
target_option_val!(limit_rdylib_exports);
target_option_val!(override_export_symbols);
target_option_val!(merge_functions);
target_option_val!(mcount, "target-mcount");
target_option_val!(llvm_abiname);
target_option_val!(relax_elf_relocations);
target_option_val!(llvm_args);
target_option_val!(use_ctors_section);
target_option_val!(eh_frame_header);
target_option_val!(has_thumb_interworking);
target_option_val!(split_debuginfo);
target_option_val!(supported_sanitizers);
target_option_val!(c_enum_min_bits);
if let Some(abi) = self.default_adjusted_cabi {
d.insert("default-adjusted-cabi".to_string(), Abi::name(abi).to_json());
}
Json::Object(d)
}
}
/// Either a target triple string or a path to a JSON file.
#[derive(PartialEq, Clone, Debug, Hash, Encodable, Decodable)]
pub enum TargetTriple {
TargetTriple(String),
TargetPath(PathBuf),
}
impl TargetTriple {
/// Creates a target triple from the passed target triple string.
pub fn from_triple(triple: &str) -> Self {
TargetTriple::TargetTriple(triple.to_string())
}
/// Creates a target triple from the passed target path.
pub fn from_path(path: &Path) -> Result<Self, io::Error> {
let canonicalized_path = path.canonicalize()?;
Ok(TargetTriple::TargetPath(canonicalized_path))
}
/// Returns a string triple for this target.
///
/// If this target is a path, the file name (without extension) is returned.
pub fn triple(&self) -> &str {
match *self {
TargetTriple::TargetTriple(ref triple) => triple,
TargetTriple::TargetPath(ref path) => path
.file_stem()
.expect("target path must not be empty")
.to_str()
.expect("target path must be valid unicode"),
}
}
/// Returns an extended string triple for this target.
///
/// If this target is a path, a hash of the path is appended to the triple returned
/// by `triple()`.
pub fn debug_triple(&self) -> String {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let triple = self.triple();
if let TargetTriple::TargetPath(ref path) = *self {
let mut hasher = DefaultHasher::new();
path.hash(&mut hasher);
let hash = hasher.finish();
format!("{}-{}", triple, hash)
} else {
triple.to_owned()
}
}
}
impl fmt::Display for TargetTriple {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.debug_triple())
}
}<|fim▁end|>
|
Use 'em', 'gcc', 'ld' or 'msvc'", name, k)
})?;
|
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var Transform = require('stream').Transform,
util = require('util');
var StreamConcat = function(streams, options) {
Transform.call(this, options);
var self = this;
this.streams = streams;
this.canAddStream = true;
this.currentStream = null;
this.streamIndex = 0;
var nextStream = function() {
self.currentStream = null;
if (self.streams.constructor === Array && self.streamIndex < self.streams.length) {
self.currentStream = self.streams[self.streamIndex++];
} else if (typeof self.streams === 'function') {
this.canAddStream = false;
self.currentStream = self.streams();
}
if (self.currentStream === null) {
this.canAddStream = false;
self.push(null);
} else {
self.currentStream.pipe(self, {end: false});
self.currentStream.on('end', nextStream);
}
};
nextStream();
};
util.inherits(StreamConcat, Transform);
StreamConcat.prototype._transform = function(chunk, encoding, callback) {<|fim▁hole|>};
StreamConcat.prototype.addStream = function(newStream) {
if (this.canAddStream)
this.streams.push(newStream);
else
this.emit('error', new Error('Can\'t add stream.'));
};
module.exports = StreamConcat;<|fim▁end|>
|
callback(null, chunk);
|
<|file_name|>0129_auto_20170815_0946.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-15 09:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('scoping', '0128_auto_20170808_0954'),
]
operations = [
migrations.CreateModel(
name='ProjectRoles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),<|fim▁hole|> migrations.RemoveField(
model_name='project',
name='owner',
),
migrations.AddField(
model_name='projectroles',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scoping.Project'),
),
migrations.AddField(
model_name='project',
name='users',
field=models.ManyToManyField(through='scoping.ProjectRoles', to=settings.AUTH_USER_MODEL),
),
]<|fim▁end|>
|
('role', models.CharField(choices=[('OW', 'Owner'), ('AD', 'Admin'), ('RE', 'Reviewer'), ('VE', 'Viewer')], max_length=2)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
|
<|file_name|>about_dictionaries.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertEqual(dict(), empty_dict)
self.assertEqual(0, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish))
def test_accessing_dictionaries(self):<|fim▁hole|> babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, babel_fish['one'])
self.assertEqual(__, babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
babel_fish['one'] = 'eins'
expected = {'two': 'dos', 'one': __}
self.assertEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = {'one': 'uno', 'two': 'dos'}
dict2 = {'two': 'dos', 'one': 'uno'}
self.assertEqual(____, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish.keys()))
self.assertEqual(__, len(babel_fish.values()))
self.assertEqual(__, 'one' in babel_fish.keys())
self.assertEqual(__, 'two' in babel_fish.values())
self.assertEqual(__, 'uno' in babel_fish.keys())
self.assertEqual(__, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(
('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf',
'confused looking zebra'),
42)
self.assertEqual(__, len(cards))
self.assertEqual(__, cards['green elf'])
self.assertEqual(__, cards['yellow dwarf'])<|fim▁end|>
| |
<|file_name|>HeartBeatQueryListenWorker.java<|end_file_name|><|fim▁begin|>/*-
* <<
* UAVStack
* ==
* Copyright (C) 2016 - 2017 UAVStack
* ==
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* >>
*/
package com.creditease.agent.feature.hbagent;
import com.creditease.agent.http.api.UAVHttpMessage;
import com.creditease.agent.spi.AbstractHttpServiceComponent;<|fim▁hole|> public HeartBeatQueryListenWorker(String cName, String feature, String initHandlerKey) {
super(cName, feature, initHandlerKey);
}
@Override
protected UAVHttpMessage adaptRequest(HttpMessage message) {
String messageBody = message.getRequestBodyAsString("UTF-8");
if (log.isDebugEnable()) {
log.debug(this, "HeartBeatQueryListenWorker Request: " + messageBody);
}
UAVHttpMessage msg = new UAVHttpMessage(messageBody);
return msg;
}
@Override
protected void adaptResponse(HttpMessage message, UAVHttpMessage t) {
String response = t.getResponseAsJsonString();
message.putResponseBodyInString(response, 200, "utf-8");
if (log.isDebugEnable()) {
log.debug(this, "HeartBeatQueryListenWorker Response: " + response);
}
}
}<|fim▁end|>
|
import com.creditease.agent.spi.HttpMessage;
public class HeartBeatQueryListenWorker extends AbstractHttpServiceComponent<UAVHttpMessage> {
|
<|file_name|>pattern_substs_on_brace_struct.rs<|end_file_name|><|fim▁begin|>#![feature(nll)]
struct Foo<'a> { field: &'a u32 }
fn in_let() {
let y = 22;
let foo = Foo { field: &y };
//~^ ERROR `y` does not live long enough
let Foo::<'static> { field: _z } = foo;
}
fn in_main() {
let y = 22;<|fim▁hole|> Foo::<'static> { field: _z } => {
}
}
}
fn main() { }<|fim▁end|>
|
let foo = Foo { field: &y };
//~^ ERROR `y` does not live long enough
match foo {
|
<|file_name|>advanced_lane_finding-direction_of_the_gradient_exercise.py<|end_file_name|><|fim▁begin|>import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
# Read in an image
image = mpimg.imread('signs_vehicles_xygrad.png')
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
<|fim▁hole|> abs_s_x = np.absolute(sobelx)
abs_s_y = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
# Important, y should come before x here if we want to detect lines
dir_grad = np.arctan2(abs_s_y, abs_s_x)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(dir_grad)
# 6) Return this mask as your binary_output image
binary_output[(dir_grad >= thresh[0]) & (dir_grad <= thresh[1])] = 1
return binary_output
# Run the function
dir_binary = dir_threshold(image, sobel_kernel=15, thresh=(0.7, 1.3))
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(dir_binary, cmap='gray')
ax2.set_title('Thresholded Grad. Dir.', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)<|fim▁end|>
|
# 3) Take the absolute value of the x and y gradients
|
<|file_name|>nfs_cmode.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp NFS storage.
"""
import os
import uuid
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (Cluster-mode)."""
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
def __init__(self, *args, **kwargs):
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
def do_setup(self, context):
"""Do the customized set up on client for cluster mode."""
super(NetAppCmodeNfsDriver, self).do_setup(context)
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
self.vserver = self.configuration.netapp_vserver
self.zapi_client = client_cmode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vserver=self.vserver)
self.ssc_enabled = True
self.ssc_vols = None
self.stale_vols = set()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
ssc_cmode.check_ssc_api_permissions(self.zapi_client)
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
"""
LOG.debug('create_volume on %s' % volume['host'])
self._ensure_shares_mounted()
# get share as pool name
share = volume_utils.extract_host(volume['host'], level='pool')
if share is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
# warn on obsolete extra specs
na_utils.log_extra_spec_warnings(extra_specs)
try:
volume['provider_location'] = share
LOG.info(_LI('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
if qos_policy_group:
self._set_qos_policy_group_on_volume(volume, share,
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
LOG.error(_LW("Exception creating vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
'ex': ex})
volume['provider_location'] = None
finally:
if self.ssc_enabled:
self._update_stale_vols(self._get_vol_for_share(share))
msg = _("Volume %s could not be created on shares.")
raise exception.VolumeBackendAPIException(data=msg % (volume['name']))
def _set_qos_policy_group_on_volume(self, volume, share, qos_policy_group):
target_path = '%s' % (volume['name'])
export_path = share.split(':')[1]
flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver,
export_path)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group,
target_path)
def _check_volume_type(self, volume, share, file_name):
"""Match volume type for share file."""
extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if not self._is_share_vol_type_match(volume, share):
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Volume type does not match for share %s."),
share))
if qos_policy_group:
try:
vserver, flex_vol_name = self._get_vserver_and_exp_vol(
share=share)
self.zapi_client.file_assign_qos(flex_vol_name,
qos_policy_group,
file_name)
except na_api.NaApiError as ex:
LOG.exception(_LE('Setting file QoS policy group failed. %s'),
ex)
raise exception.NetAppDriverException(
reason=(_('Setting file QoS policy group failed. %s'), ex))
def _clone_volume(self, volume_name, clone_name,
volume_id, share=None):
"""Clones mounted volume on NetApp Cluster."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share)
self.zapi_client.clone_file(exp_volume, volume_name, clone_name,
vserver)
share = share if share else self._get_provider_location(volume_id)
self._post_prov_deprov_in_ssc(share)
def _get_vserver_and_exp_vol(self, volume_id=None, share=None):
"""Gets the vserver and export volume for share."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
ifs = self.zapi_client.get_if_info_by_ip(host_ip)
vserver = ifs[0].get_child_content('vserver')
exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
export_path)
return vserver, exp_volume
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
sync = True if self.ssc_vols is None else False
ssc_cmode.refresh_cluster_ssc(self, self.zapi_client.connection,
self.vserver, synchronous=sync)
LOG.debug('Updating volume stats')
data = {}
netapp_backend = 'NetApp_NFS_Cluster_direct'
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or netapp_backend
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats()
self._spawn_clean_cache_job()
self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
self._stats = data
def _get_pool_stats(self):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
for nfs_share in self._mounted_shares:
capacity = self._get_extended_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
# Report pool as reserved when over the configured used_ratio
if capacity['used_ratio'] > self.configuration.nfs_used_ratio:
pool['reserved_percentage'] = 100
# Report pool as reserved when over the subscribed ratio
if capacity['subscribed_ratio'] >=\
self.configuration.nfs_oversub_ratio:
pool['reserved_percentage'] = 100
# convert sizes to GB
total = float(capacity['apparent_size']) / units.Gi
pool['total_capacity_gb'] = na_utils.round_down(total, '0.01')
free = float(capacity['apparent_available']) / units.Gi
pool['free_capacity_gb'] = na_utils.round_down(free, '0.01')
# add SSC content if available
vol = self._get_vol_for_share(nfs_share)
if vol and self.ssc_vols:
pool['netapp_raid_type'] = vol.aggr['raid_type']
pool['netapp_disk_type'] = vol.aggr['disk_type']
mirrored = vol in self.ssc_vols['mirrored']<|fim▁hole|>
dedup = vol in self.ssc_vols['dedup']
pool['netapp_dedup'] = six.text_type(dedup).lower()
pool['netapp_nodedup'] = six.text_type(not dedup).lower()
compression = vol in self.ssc_vols['compression']
pool['netapp_compression'] = six.text_type(compression).lower()
pool['netapp_nocompression'] = six.text_type(
not compression).lower()
thin = vol in self.ssc_vols['thin']
pool['netapp_thin_provisioned'] = six.text_type(thin).lower()
pool['netapp_thick_provisioned'] = six.text_type(
not thin).lower()
pools.append(pool)
return pools
@utils.synchronized('update_stale')
def _update_stale_vols(self, volume=None, reset=False):
"""Populates stale vols with vol and returns set copy."""
if volume:
self.stale_vols.add(volume)
set_copy = self.stale_vols.copy()
if reset:
self.stale_vols.clear()
return set_copy
@utils.synchronized("refresh_ssc_vols")
def refresh_ssc_vols(self, vols):
"""Refreshes ssc_vols with latest entries."""
if not self._mounted_shares:
LOG.warning(_LW("No shares found hence skipping ssc refresh."))
return
mnt_share_vols = set()
vs_ifs = self.zapi_client.get_vserver_ips(self.vserver)
for vol in vols['all']:
for sh in self._mounted_shares:
host = sh.split(':')[0]
junction = sh.split(':')[1]
ip = na_utils.resolve_hostname(host)
if (self._ip_in_ifs(ip, vs_ifs) and
junction == vol.id['junction_path']):
mnt_share_vols.add(vol)
vol.export['path'] = sh
break
for key in vols.keys():
vols[key] = vols[key] & mnt_share_vols
self.ssc_vols = vols
def _ip_in_ifs(self, ip, api_ifs):
"""Checks if ip is listed for ifs in API format."""
if api_ifs is None:
return False
for ifc in api_ifs:
ifc_ip = ifc.get_child_content("address")
if ifc_ip == ip:
return True
return False
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(vserver, exp_volume) = self._get_vserver_and_exp_vol(
volume_id=None, share=share)
for file in old_files:
path = '/vol/%s/%s' % (exp_volume, file)
u_bytes = self.zapi_client.get_file_usage(path, vserver)
file_list.append((file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
ip_vserver = self._get_vserver_for_ip(ip)
if ip_vserver and shares:
for share in shares:
ip_sh = share.split(':')[0]
sh_vserver = self._get_vserver_for_ip(ip_sh)
if sh_vserver == ip_vserver:
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _get_vserver_for_ip(self, ip):
"""Get vserver for the mentioned ip."""
try:
ifs = self.zapi_client.get_if_info_by_ip(ip)
vserver = ifs[0].get_child_content('vserver')
return vserver
except Exception:
return None
def _get_vol_for_share(self, nfs_share):
"""Gets the ssc vol with given share."""
if self.ssc_vols:
for vol in self.ssc_vols['all']:
if vol.export['path'] == nfs_share:
return vol
return None
def _is_share_vol_compatible(self, volume, share):
"""Checks if share is compatible with volume to host it."""
compatible = self._is_share_eligible(share, volume['size'])
if compatible and self.ssc_enabled:
matched = self._is_share_vol_type_match(volume, share)
compatible = compatible and matched
return compatible
def _is_share_vol_type_match(self, volume, share):
"""Checks if share matches volume type."""
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
def delete_volume(self, volume):
"""Deletes a logical volume."""
share = volume['provider_location']
super(NetAppCmodeNfsDriver, self).delete_volume(volume)
self._post_prov_deprov_in_ssc(share)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
share = self._get_provider_location(snapshot.volume_id)
super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot)
self._post_prov_deprov_in_ssc(share)
def _post_prov_deprov_in_ssc(self, share):
if self.ssc_enabled and share:
netapp_vol = self._get_vol_for_share(share)
if netapp_vol:
self._update_stale_vols(volume=netapp_vol)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
copy_success = False
try:
major, minor = self.zapi_client.get_ontapi_version()
col_path = self.configuration.netapp_copyoffload_tool_path
if major == 1 and minor >= 20 and col_path:
self._try_copyoffload(context, volume, image_service, image_id)
copy_success = True
LOG.info(_LI('Copied image %(img)s to volume %(vol)s using '
'copy offload workflow.')
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug("Copy offload either not configured or"
" unsupported.")
except Exception as e:
LOG.exception(_LE('Copy offload workflow unsuccessful. %s'), e)
finally:
if not copy_success:
super(NetAppCmodeNfsDriver, self).copy_image_to_volume(
context, volume, image_service, image_id)
if self.ssc_enabled:
sh = self._get_provider_location(volume['id'])
self._update_stale_vols(self._get_vol_for_share(sh))
def _try_copyoffload(self, context, volume, image_service, image_id):
"""Tries server side file copy offload."""
copied = False
cache_result = self._find_image_in_cache(image_id)
if cache_result:
copied = self._copy_from_cache(volume, image_id, cache_result)
if not cache_result or not copied:
self._copy_from_img_service(context, volume, image_service,
image_id)
def _get_ip_verify_on_cluster(self, host):
"""Verifies if host on same cluster and returns ip."""
ip = na_utils.resolve_hostname(host)
vserver = self._get_vserver_for_ip(ip)
if not vserver:
raise exception.NotFound(_("Unable to locate an SVM that is "
"managing the IP address '%s'") % ip)
return ip
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
LOG.debug("Trying copy from cache using copy offload.")
copied = False
for res in cache_result:
try:
(share, file_name) = res
LOG.debug("Found cache file_name on share %s.", share)
if share != self._get_provider_location(volume['id']):
col_path = self.configuration.netapp_copyoffload_tool_path
src_ip = self._get_ip_verify_on_cluster(
share.split(':')[0])
src_path = os.path.join(share.split(':')[1], file_name)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
dst_path = os.path.join(
self._get_export_path(volume['id']), volume['name'])
self._execute(col_path, src_ip, dst_ip,
src_path, dst_path,
run_as_root=self._execute_as_root,
check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug("Copied image from cache to volume %s using"
" copy offload.", volume['id'])
else:
self._clone_file_dst_exists(share, file_name,
volume['name'],
dest_exists=True)
LOG.debug("Copied image from cache to volume %s using"
" cloning.", volume['id'])
self._post_clone_image(volume)
copied = True
break
except Exception as e:
LOG.exception(_LE('Error in workflow copy from cache. %s.'), e)
return copied
def _clone_file_dst_exists(self, share, src_name, dst_name,
dest_exists=False):
"""Clone file even if dest exists."""
(vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share)
self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver,
dest_exists=dest_exists)
def _copy_from_img_service(self, context, volume, image_service,
image_id):
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
image_loc = self._construct_image_nfs_url(image_loc)
conn, dr = self._check_get_nfs_path_segs(image_loc)
if conn:
src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
else:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = image_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
# tmp file is required to deal with img formats
tmp_img_file = six.text_type(uuid.uuid4())
col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
dst_share = self._get_provider_location(volume['id'])
self._check_share_can_hold_size(dst_share, img_info['size'])
run_as_root = self._execute_as_root
dst_dir = self._get_mount_point_for_share(dst_share)
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
# If src and dst share not equal
if (('%s:%s' % (src_ip, dr)) !=
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
dst_img_serv_path = os.path.join(
self._get_export_path(volume['id']), tmp_img_file)
self._execute(col_path, src_ip, dst_ip, src_path,
dst_img_serv_path, run_as_root=run_as_root,
check_exit_code=0)
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.'
% {'img': image_id, 'tmp': tmp_img_file})
dst_img_cache_local = os.path.join(dst_dir,
'img-cache-%s' % image_id)
if img_info['disk_format'] == 'raw':
LOG.debug('Image is raw %s.', image_id)
self._clone_file_dst_exists(dst_share, tmp_img_file,
volume['name'], dest_exists=True)
self._move_nfs_file(dst_img_local, dst_img_cache_local)
LOG.debug('Copied raw image %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
else:
LOG.debug('Image will be converted to raw %s.', image_id)
img_conv = six.text_type(uuid.uuid4())
dst_img_conv_local = os.path.join(dst_dir, img_conv)
# Checking against image size which is approximate check
self._check_share_can_hold_size(dst_share, img_info['size'])
try:
image_utils.convert_image(dst_img_local,
dst_img_conv_local, 'raw',
run_as_root=run_as_root)
data = image_utils.qemu_img_info(dst_img_conv_local,
run_as_root=run_as_root)
if data.file_format != "raw":
raise exception.InvalidResults(
_("Converted to raw, but format is now %s.")
% data.file_format)
else:
self._clone_file_dst_exists(dst_share, img_conv,
volume['name'],
dest_exists=True)
self._move_nfs_file(dst_img_conv_local,
dst_img_cache_local)
LOG.debug('Copied locally converted raw image'
' %(img)s to volume %(vol)s.'
% {'img': image_id, 'vol': volume['id']})
finally:
if os.path.exists(dst_img_conv_local):
self._delete_file(dst_img_conv_local)
self._post_clone_image(volume)
finally:
if os.path.exists(dst_img_local):
self._delete_file(dst_img_local)<|fim▁end|>
|
pool['netapp_mirrored'] = six.text_type(mirrored).lower()
pool['netapp_unmirrored'] = six.text_type(not mirrored).lower()
|
<|file_name|>prism.js<|end_file_name|><|fim▁begin|>/* http://prismjs.com/download.html?themes=prism&languages=git&plugins=line-numbers */
self = (typeof window !== 'undefined')
? window // if in browser
: (
(typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope)
? self // if in worker
: {} // if in node js
);
/**
* Prism: Lightweight, robust, elegant syntax highlighting
* MIT license http://www.opensource.org/licenses/mit-license.php/
* @author Lea Verou http://lea.verou.me
*/
var Prism = (function(){
// Private helper vars
var lang = /\blang(?:uage)?-(?!\*)(\w+)\b/i;
var _ = self.Prism = {
util: {
encode: function (tokens) {
if (tokens instanceof Token) {
return new Token(tokens.type, _.util.encode(tokens.content), tokens.alias);
} else if (_.util.type(tokens) === 'Array') {
return tokens.map(_.util.encode);
} else {
return tokens.replace(/&/g, '&').replace(/</g, '<').replace(/\u00a0/g, ' ');
}
},
type: function (o) {
return Object.prototype.toString.call(o).match(/\[object (\w+)\]/)[1];
},
// Deep clone a language definition (e.g. to extend it)
clone: function (o) {
var type = _.util.type(o);
switch (type) {
case 'Object':
var clone = {};
for (var key in o) {
if (o.hasOwnProperty(key)) {
clone[key] = _.util.clone(o[key]);
}
}
return clone;
case 'Array':
return o.map(function(v) { return _.util.clone(v); });
}
return o;
}
},
languages: {
extend: function (id, redef) {
var lang = _.util.clone(_.languages[id]);
for (var key in redef) {
lang[key] = redef[key];
}
return lang;
},
/**
* Insert a token before another token in a language literal
* As this needs to recreate the object (we cannot actually insert before keys in object literals),
* we cannot just provide an object, we need anobject and a key.
* @param inside The key (or language id) of the parent
* @param before The key to insert before. If not provided, the function appends instead.
* @param insert Object with the key/value pairs to insert
* @param root The object that contains `inside`. If equal to Prism.languages, it can be omitted.
*/
insertBefore: function (inside, before, insert, root) {
root = root || _.languages;
var grammar = root[inside];
if (arguments.length == 2) {
insert = arguments[1];
for (var newToken in insert) {
if (insert.hasOwnProperty(newToken)) {
grammar[newToken] = insert[newToken];
}
}
return grammar;
}
var ret = {};
for (var token in grammar) {
if (grammar.hasOwnProperty(token)) {
if (token == before) {
for (var newToken in insert) {
if (insert.hasOwnProperty(newToken)) {
ret[newToken] = insert[newToken];
}
}
}
ret[token] = grammar[token];
}
}
// Update references in other language definitions
_.languages.DFS(_.languages, function(key, value) {
if (value === root[inside] && key != inside) {
this[key] = ret;
}
});
return root[inside] = ret;
},
// Traverse a language definition with Depth First Search
DFS: function(o, callback, type) {
for (var i in o) {
if (o.hasOwnProperty(i)) {
callback.call(o, i, o[i], type || i);
if (_.util.type(o[i]) === 'Object') {
_.languages.DFS(o[i], callback);
}
else if (_.util.type(o[i]) === 'Array') {
_.languages.DFS(o[i], callback, i);
}
}
}
}
},
highlightAll: function(async, callback) {
var elements = document.querySelectorAll('code[class*="language-"], [class*="language-"] code, code[class*="lang-"], [class*="lang-"] code');
for (var i=0, element; element = elements[i++];) {
_.highlightElement(element, async === true, callback);
}
},
highlightElement: function(element, async, callback) {
// Find language
var language, grammar, parent = element;
while (parent && !lang.test(parent.className)) {
parent = parent.parentNode;
}
if (parent) {
language = (parent.className.match(lang) || [,''])[1];
grammar = _.languages[language];
}
if (!grammar) {
return;
}
// Set language on the element, if not present
element.className = element.className.replace(lang, '').replace(/\s+/g, ' ') + ' language-' + language;
// Set language on the parent, for styling
parent = element.parentNode;
if (/pre/i.test(parent.nodeName)) {
parent.className = parent.className.replace(lang, '').replace(/\s+/g, ' ') + ' language-' + language;
}
var code = element.textContent;
if(!code) {
return;
}
code = code.replace(/^(?:\r?\n|\r)/,'');
var env = {
element: element,
language: language,
grammar: grammar,
code: code
};
_.hooks.run('before-highlight', env);
if (async && self.Worker) {
var worker = new Worker(_.filename);
worker.onmessage = function(evt) {
env.highlightedCode = Token.stringify(JSON.parse(evt.data), language);
_.hooks.run('before-insert', env);
env.element.innerHTML = env.highlightedCode;
callback && callback.call(env.element);
_.hooks.run('after-highlight', env);
};
worker.postMessage(JSON.stringify({
language: env.language,
code: env.code
}));
}
else {
env.highlightedCode = _.highlight(env.code, env.grammar, env.language);
_.hooks.run('before-insert', env);
env.element.innerHTML = env.highlightedCode;
callback && callback.call(element);
_.hooks.run('after-highlight', env);
}
},
highlight: function (text, grammar, language) {
var tokens = _.tokenize(text, grammar);
return Token.stringify(_.util.encode(tokens), language);
},
tokenize: function(text, grammar, language) {
var Token = _.Token;
var strarr = [text];
var rest = grammar.rest;
if (rest) {
for (var token in rest) {
grammar[token] = rest[token];
}
delete grammar.rest;
}
tokenloop: for (var token in grammar) {
if(!grammar.hasOwnProperty(token) || !grammar[token]) {
continue;
}
var patterns = grammar[token];
patterns = (_.util.type(patterns) === "Array") ? patterns : [patterns];
for (var j = 0; j < patterns.length; ++j) {
var pattern = patterns[j],
inside = pattern.inside,
lookbehind = !!pattern.lookbehind,
lookbehindLength = 0,
alias = pattern.alias;
pattern = pattern.pattern || pattern;
for (var i=0; i<strarr.length; i++) { // Don’t cache length as it changes during the loop
var str = strarr[i];
if (strarr.length > text.length) {
// Something went terribly wrong, ABORT, ABORT!
break tokenloop;
}
if (str instanceof Token) {
continue;
}
pattern.lastIndex = 0;
var match = pattern.exec(str);
if (match) {
if(lookbehind) {
lookbehindLength = match[1].length;
}
var from = match.index - 1 + lookbehindLength,
match = match[0].slice(lookbehindLength),
len = match.length,
to = from + len,
before = str.slice(0, from + 1),
after = str.slice(to + 1);
var args = [i, 1];
if (before) {
args.push(before);
}
var wrapped = new Token(token, inside? _.tokenize(match, inside) : match, alias);
args.push(wrapped);
<|fim▁hole|> args.push(after);
}
Array.prototype.splice.apply(strarr, args);
}
}
}
}
return strarr;
},
hooks: {
all: {},
add: function (name, callback) {
var hooks = _.hooks.all;
hooks[name] = hooks[name] || [];
hooks[name].push(callback);
},
run: function (name, env) {
var callbacks = _.hooks.all[name];
if (!callbacks || !callbacks.length) {
return;
}
for (var i=0, callback; callback = callbacks[i++];) {
callback(env);
}
}
}
};
var Token = _.Token = function(type, content, alias) {
this.type = type;
this.content = content;
this.alias = alias;
};
Token.stringify = function(o, language, parent) {
if (typeof o == 'string') {
return o;
}
if (_.util.type(o) === 'Array') {
return o.map(function(element) {
return Token.stringify(element, language, o);
}).join('');
}
var env = {
type: o.type,
content: Token.stringify(o.content, language, parent),
tag: 'span',
classes: ['token', o.type],
attributes: {},
language: language,
parent: parent
};
if (env.type == 'comment') {
env.attributes['spellcheck'] = 'true';
}
if (o.alias) {
var aliases = _.util.type(o.alias) === 'Array' ? o.alias : [o.alias];
Array.prototype.push.apply(env.classes, aliases);
}
_.hooks.run('wrap', env);
var attributes = '';
for (var name in env.attributes) {
attributes += name + '="' + (env.attributes[name] || '') + '"';
}
return '<' + env.tag + ' class="' + env.classes.join(' ') + '" ' + attributes + '>' + env.content + '</' + env.tag + '>';
};
if (!self.document) {
if (!self.addEventListener) {
// in Node.js
return self.Prism;
}
// In worker
self.addEventListener('message', function(evt) {
var message = JSON.parse(evt.data),
lang = message.language,
code = message.code;
self.postMessage(JSON.stringify(_.util.encode(_.tokenize(code, _.languages[lang]))));
self.close();
}, false);
return self.Prism;
}
// Get current script and highlight
var script = document.getElementsByTagName('script');
script = script[script.length - 1];
if (script) {
_.filename = script.src;
if (document.addEventListener && !script.hasAttribute('data-manual')) {
document.addEventListener('DOMContentLoaded', _.highlightAll);
}
}
return self.Prism;
})();
if (typeof module !== 'undefined' && module.exports) {
module.exports = Prism;
}
;
Prism.languages.git = {
/*
* A simple one line comment like in a git status command
* For instance:
* $ git status
* # On branch infinite-scroll
* # Your branch and 'origin/sharedBranches/frontendTeam/infinite-scroll' have diverged,
* # and have 1 and 2 different commits each, respectively.
* nothing to commit (working directory clean)
*/
'comment': /^#.*$/m,
/*
* a string (double and simple quote)
*/
'string': /("|')(\\?.)*?\1/m,
/*
* a git command. It starts with a random prompt finishing by a $, then "git" then some other parameters
* For instance:
* $ git add file.txt
*/
'command': {
pattern: /^.*\$ git .*$/m,
inside: {
/*
* A git command can contain a parameter starting by a single or a double dash followed by a string
* For instance:
* $ git diff --cached
* $ git log -p
*/
'parameter': /\s(--|-)\w+/m
}
},
/*
* Coordinates displayed in a git diff command
* For instance:
* $ git diff
* diff --git file.txt file.txt
* index 6214953..1d54a52 100644
* --- file.txt
* +++ file.txt
* @@ -1 +1,2 @@
* -Here's my tetx file
* +Here's my text file
* +And this is the second line
*/
'coord': /^@@.*@@$/m,
/*
* Regexp to match the changed lines in a git diff output. Check the example above.
*/
'deleted': /^-(?!-).+$/m,
'inserted': /^\+(?!\+).+$/m,
/*
* Match a "commit [SHA1]" line in a git log output.
* For instance:
* $ git log
* commit a11a14ef7e26f2ca62d4b35eac455ce636d0dc09
* Author: lgiraudel
* Date: Mon Feb 17 11:18:34 2014 +0100
*
* Add of a new line
*/
'commit_sha1': /^commit \w{40}$/m
};
;
Prism.hooks.add('after-highlight', function (env) {
// works only for <code> wrapped inside <pre data-line-numbers> (not inline)
var pre = env.element.parentNode;
if (!pre || !/pre/i.test(pre.nodeName) || pre.className.indexOf('line-numbers') === -1) {
return;
}
var linesNum = (1 + env.code.split('\n').length);
var lineNumbersWrapper;
var lines = new Array(linesNum);
lines = lines.join('<span></span>');
lineNumbersWrapper = document.createElement('span');
lineNumbersWrapper.className = 'line-numbers-rows';
lineNumbersWrapper.innerHTML = lines;
if (pre.hasAttribute('data-start')) {
pre.style.counterReset = 'linenumber ' + (parseInt(pre.getAttribute('data-start'), 10) - 1);
}
env.element.appendChild(lineNumbersWrapper);
});;<|fim▁end|>
|
if (after) {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.