id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/flask-logmanager-0.2.11.tar.gz/flask-logmanager-0.2.11/flask_logmanager/swagger-ui/lib/marked.js
|
;(function() {
/**
* Block-Level Grammar
*/
var block = {
newline: /^\n+/,
code: /^( {4}[^\n]+\n*)+/,
fences: noop,
hr: /^( *[-*_]){3,} *(?:\n+|$)/,
heading: /^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)/,
nptable: noop,
lheading: /^([^\n]+)\n *(=|-){2,} *(?:\n+|$)/,
blockquote: /^( *>[^\n]+(\n(?!def)[^\n]+)*\n*)+/,
list: /^( *)(bull) [\s\S]+?(?:hr|def|\n{2,}(?! )(?!\1bull )\n*|\s*$)/,
html: /^ *(?:comment *(?:\n|\s*$)|closed *(?:\n{2,}|\s*$)|closing *(?:\n{2,}|\s*$))/,
def: /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$)/,
table: noop,
paragraph: /^((?:[^\n]+\n?(?!hr|heading|lheading|blockquote|tag|def))+)\n*/,
text: /^[^\n]+/
};
block.bullet = /(?:[*+-]|\d+\.)/;
block.item = /^( *)(bull) [^\n]*(?:\n(?!\1bull )[^\n]*)*/;
block.item = replace(block.item, 'gm')
(/bull/g, block.bullet)
();
block.list = replace(block.list)
(/bull/g, block.bullet)
('hr', '\\n+(?=\\1?(?:[-*_] *){3,}(?:\\n+|$))')
('def', '\\n+(?=' + block.def.source + ')')
();
block.blockquote = replace(block.blockquote)
('def', block.def)
();
block._tag = '(?!(?:'
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code'
+ '|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo'
+ '|span|br|wbr|ins|del|img)\\b)\\w+(?!:/|[^\\w\\s@]*@)\\b';
block.html = replace(block.html)
('comment', /<!--[\s\S]*?-->/)
('closed', /<(tag)[\s\S]+?<\/\1>/)
('closing', /<tag(?:"[^"]*"|'[^']*'|[^'">])*?>/)
(/tag/g, block._tag)
();
block.paragraph = replace(block.paragraph)
('hr', block.hr)
('heading', block.heading)
('lheading', block.lheading)
('blockquote', block.blockquote)
('tag', '<' + block._tag)
('def', block.def)
();
/**
* Normal Block Grammar
*/
block.normal = merge({}, block);
/**
* GFM Block Grammar
*/
block.gfm = merge({}, block.normal, {
fences: /^ *(`{3,}|~{3,}) *(\S+)? *\n([\s\S]+?)\s*\1 *(?:\n+|$)/,
paragraph: /^/
});
block.gfm.paragraph = replace(block.paragraph)
('(?!', '(?!'
+ block.gfm.fences.source.replace('\\1', '\\2') + '|'
+ block.list.source.replace('\\1', '\\3') + '|')
();
/**
* GFM + Tables Block Grammar
*/
block.tables = merge({}, block.gfm, {
nptable: /^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*/,
table: /^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*/
});
/**
* Block Lexer
*/
function Lexer(options) {
this.tokens = [];
this.tokens.links = {};
this.options = options || marked.defaults;
this.rules = block.normal;
if (this.options.gfm) {
if (this.options.tables) {
this.rules = block.tables;
} else {
this.rules = block.gfm;
}
}
}
/**
* Expose Block Rules
*/
Lexer.rules = block;
/**
* Static Lex Method
*/
Lexer.lex = function(src, options) {
var lexer = new Lexer(options);
return lexer.lex(src);
};
/**
* Preprocessing
*/
Lexer.prototype.lex = function(src) {
src = src
.replace(/\r\n|\r/g, '\n')
.replace(/\t/g, ' ')
.replace(/\u00a0/g, ' ')
.replace(/\u2424/g, '\n');
return this.token(src, true);
};
/**
* Lexing
*/
Lexer.prototype.token = function(src, top, bq) {
var src = src.replace(/^ +$/gm, '')
, next
, loose
, cap
, bull
, b
, item
, space
, i
, l;
while (src) {
// newline
if (cap = this.rules.newline.exec(src)) {
src = src.substring(cap[0].length);
if (cap[0].length > 1) {
this.tokens.push({
type: 'space'
});
}
}
// code
if (cap = this.rules.code.exec(src)) {
src = src.substring(cap[0].length);
cap = cap[0].replace(/^ {4}/gm, '');
this.tokens.push({
type: 'code',
text: !this.options.pedantic
? cap.replace(/\n+$/, '')
: cap
});
continue;
}
// fences (gfm)
if (cap = this.rules.fences.exec(src)) {
src = src.substring(cap[0].length);
this.tokens.push({
type: 'code',
lang: cap[2],
text: cap[3]
});
continue;
}
// heading
if (cap = this.rules.heading.exec(src)) {
src = src.substring(cap[0].length);
this.tokens.push({
type: 'heading',
depth: cap[1].length,
text: cap[2]
});
continue;
}
// table no leading pipe (gfm)
if (top && (cap = this.rules.nptable.exec(src))) {
src = src.substring(cap[0].length);
item = {
type: 'table',
header: cap[1].replace(/^ *| *\| *$/g, '').split(/ *\| */),
align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
cells: cap[3].replace(/\n$/, '').split('\n')
};
for (i = 0; i < item.align.length; i++) {
if (/^ *-+: *$/.test(item.align[i])) {
item.align[i] = 'right';
} else if (/^ *:-+: *$/.test(item.align[i])) {
item.align[i] = 'center';
} else if (/^ *:-+ *$/.test(item.align[i])) {
item.align[i] = 'left';
} else {
item.align[i] = null;
}
}
for (i = 0; i < item.cells.length; i++) {
item.cells[i] = item.cells[i].split(/ *\| */);
}
this.tokens.push(item);
continue;
}
// lheading
if (cap = this.rules.lheading.exec(src)) {
src = src.substring(cap[0].length);
this.tokens.push({
type: 'heading',
depth: cap[2] === '=' ? 1 : 2,
text: cap[1]
});
continue;
}
// hr
if (cap = this.rules.hr.exec(src)) {
src = src.substring(cap[0].length);
this.tokens.push({
type: 'hr'
});
continue;
}
// blockquote
if (cap = this.rules.blockquote.exec(src)) {
src = src.substring(cap[0].length);
this.tokens.push({
type: 'blockquote_start'
});
cap = cap[0].replace(/^ *> ?/gm, '');
// Pass `top` to keep the current
// "toplevel" state. This is exactly
// how markdown.pl works.
this.token(cap, top, true);
this.tokens.push({
type: 'blockquote_end'
});
continue;
}
// list
if (cap = this.rules.list.exec(src)) {
src = src.substring(cap[0].length);
bull = cap[2];
this.tokens.push({
type: 'list_start',
ordered: bull.length > 1
});
// Get each top-level item.
cap = cap[0].match(this.rules.item);
next = false;
l = cap.length;
i = 0;
for (; i < l; i++) {
item = cap[i];
// Remove the list item's bullet
// so it is seen as the next token.
space = item.length;
item = item.replace(/^ *([*+-]|\d+\.) +/, '');
// Outdent whatever the
// list item contains. Hacky.
if (~item.indexOf('\n ')) {
space -= item.length;
item = !this.options.pedantic
? item.replace(new RegExp('^ {1,' + space + '}', 'gm'), '')
: item.replace(/^ {1,4}/gm, '');
}
// Determine whether the next list item belongs here.
// Backpedal if it does not belong in this list.
if (this.options.smartLists && i !== l - 1) {
b = block.bullet.exec(cap[i + 1])[0];
if (bull !== b && !(bull.length > 1 && b.length > 1)) {
src = cap.slice(i + 1).join('\n') + src;
i = l - 1;
}
}
// Determine whether item is loose or not.
// Use: /(^|\n)(?! )[^\n]+\n\n(?!\s*$)/
// for discount behavior.
loose = next || /\n\n(?!\s*$)/.test(item);
if (i !== l - 1) {
next = item.charAt(item.length - 1) === '\n';
if (!loose) loose = next;
}
this.tokens.push({
type: loose
? 'loose_item_start'
: 'list_item_start'
});
// Recurse.
this.token(item, false, bq);
this.tokens.push({
type: 'list_item_end'
});
}
this.tokens.push({
type: 'list_end'
});
continue;
}
// html
if (cap = this.rules.html.exec(src)) {
src = src.substring(cap[0].length);
this.tokens.push({
type: this.options.sanitize
? 'paragraph'
: 'html',
pre: cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style',
text: cap[0]
});
continue;
}
// def
if ((!bq && top) && (cap = this.rules.def.exec(src))) {
src = src.substring(cap[0].length);
this.tokens.links[cap[1].toLowerCase()] = {
href: cap[2],
title: cap[3]
};
continue;
}
// table (gfm)
if (top && (cap = this.rules.table.exec(src))) {
src = src.substring(cap[0].length);
item = {
type: 'table',
header: cap[1].replace(/^ *| *\| *$/g, '').split(/ *\| */),
align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
cells: cap[3].replace(/(?: *\| *)?\n$/, '').split('\n')
};
for (i = 0; i < item.align.length; i++) {
if (/^ *-+: *$/.test(item.align[i])) {
item.align[i] = 'right';
} else if (/^ *:-+: *$/.test(item.align[i])) {
item.align[i] = 'center';
} else if (/^ *:-+ *$/.test(item.align[i])) {
item.align[i] = 'left';
} else {
item.align[i] = null;
}
}
for (i = 0; i < item.cells.length; i++) {
item.cells[i] = item.cells[i]
.replace(/^ *\| *| *\| *$/g, '')
.split(/ *\| */);
}
this.tokens.push(item);
continue;
}
// top-level paragraph
if (top && (cap = this.rules.paragraph.exec(src))) {
src = src.substring(cap[0].length);
this.tokens.push({
type: 'paragraph',
text: cap[1].charAt(cap[1].length - 1) === '\n'
? cap[1].slice(0, -1)
: cap[1]
});
continue;
}
// text
if (cap = this.rules.text.exec(src)) {
// Top-level should never reach here.
src = src.substring(cap[0].length);
this.tokens.push({
type: 'text',
text: cap[0]
});
continue;
}
if (src) {
throw new
Error('Infinite loop on byte: ' + src.charCodeAt(0));
}
}
return this.tokens;
};
/**
* Inline-Level Grammar
*/
var inline = {
escape: /^\\([\\`*{}\[\]()#+\-.!_>])/,
autolink: /^<([^ >]+(@|:\/)[^ >]+)>/,
url: noop,
tag: /^<!--[\s\S]*?-->|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>/,
link: /^!?\[(inside)\]\(href\)/,
reflink: /^!?\[(inside)\]\s*\[([^\]]*)\]/,
nolink: /^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]/,
strong: /^__([\s\S]+?)__(?!_)|^\*\*([\s\S]+?)\*\*(?!\*)/,
em: /^\b_((?:__|[\s\S])+?)_\b|^\*((?:\*\*|[\s\S])+?)\*(?!\*)/,
code: /^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)/,
br: /^ {2,}\n(?!\s*$)/,
del: noop,
text: /^[\s\S]+?(?=[\\<!\[_*`]| {2,}\n|$)/
};
inline._inside = /(?:\[[^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*/;
inline._href = /\s*<?([\s\S]*?)>?(?:\s+['"]([\s\S]*?)['"])?\s*/;
inline.link = replace(inline.link)
('inside', inline._inside)
('href', inline._href)
();
inline.reflink = replace(inline.reflink)
('inside', inline._inside)
();
/**
* Normal Inline Grammar
*/
inline.normal = merge({}, inline);
/**
* Pedantic Inline Grammar
*/
inline.pedantic = merge({}, inline.normal, {
strong: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,
em: /^_(?=\S)([\s\S]*?\S)_(?!_)|^\*(?=\S)([\s\S]*?\S)\*(?!\*)/
});
/**
* GFM Inline Grammar
*/
inline.gfm = merge({}, inline.normal, {
escape: replace(inline.escape)('])', '~|])')(),
url: /^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])/,
del: /^~~(?=\S)([\s\S]*?\S)~~/,
text: replace(inline.text)
(']|', '~]|')
('|', '|https?://|')
()
});
/**
* GFM + Line Breaks Inline Grammar
*/
inline.breaks = merge({}, inline.gfm, {
br: replace(inline.br)('{2,}', '*')(),
text: replace(inline.gfm.text)('{2,}', '*')()
});
/**
* Inline Lexer & Compiler
*/
function InlineLexer(links, options) {
this.options = options || marked.defaults;
this.links = links;
this.rules = inline.normal;
this.renderer = this.options.renderer || new Renderer;
this.renderer.options = this.options;
if (!this.links) {
throw new
Error('Tokens array requires a `links` property.');
}
if (this.options.gfm) {
if (this.options.breaks) {
this.rules = inline.breaks;
} else {
this.rules = inline.gfm;
}
} else if (this.options.pedantic) {
this.rules = inline.pedantic;
}
}
/**
* Expose Inline Rules
*/
InlineLexer.rules = inline;
/**
* Static Lexing/Compiling Method
*/
InlineLexer.output = function(src, links, options) {
var inline = new InlineLexer(links, options);
return inline.output(src);
};
/**
* Lexing/Compiling
*/
InlineLexer.prototype.output = function(src) {
var out = ''
, link
, text
, href
, cap;
while (src) {
// escape
if (cap = this.rules.escape.exec(src)) {
src = src.substring(cap[0].length);
out += cap[1];
continue;
}
// autolink
if (cap = this.rules.autolink.exec(src)) {
src = src.substring(cap[0].length);
if (cap[2] === '@') {
text = cap[1].charAt(6) === ':'
? this.mangle(cap[1].substring(7))
: this.mangle(cap[1]);
href = this.mangle('mailto:') + text;
} else {
text = escape(cap[1]);
href = text;
}
out += this.renderer.link(href, null, text);
continue;
}
// url (gfm)
if (!this.inLink && (cap = this.rules.url.exec(src))) {
src = src.substring(cap[0].length);
text = escape(cap[1]);
href = text;
out += this.renderer.link(href, null, text);
continue;
}
// tag
if (cap = this.rules.tag.exec(src)) {
if (!this.inLink && /^<a /i.test(cap[0])) {
this.inLink = true;
} else if (this.inLink && /^<\/a>/i.test(cap[0])) {
this.inLink = false;
}
src = src.substring(cap[0].length);
out += this.options.sanitize
? escape(cap[0])
: cap[0];
continue;
}
// link
if (cap = this.rules.link.exec(src)) {
src = src.substring(cap[0].length);
this.inLink = true;
out += this.outputLink(cap, {
href: cap[2],
title: cap[3]
});
this.inLink = false;
continue;
}
// reflink, nolink
if ((cap = this.rules.reflink.exec(src))
|| (cap = this.rules.nolink.exec(src))) {
src = src.substring(cap[0].length);
link = (cap[2] || cap[1]).replace(/\s+/g, ' ');
link = this.links[link.toLowerCase()];
if (!link || !link.href) {
out += cap[0].charAt(0);
src = cap[0].substring(1) + src;
continue;
}
this.inLink = true;
out += this.outputLink(cap, link);
this.inLink = false;
continue;
}
// strong
if (cap = this.rules.strong.exec(src)) {
src = src.substring(cap[0].length);
out += this.renderer.strong(this.output(cap[2] || cap[1]));
continue;
}
// em
if (cap = this.rules.em.exec(src)) {
src = src.substring(cap[0].length);
out += this.renderer.em(this.output(cap[2] || cap[1]));
continue;
}
// code
if (cap = this.rules.code.exec(src)) {
src = src.substring(cap[0].length);
out += this.renderer.codespan(escape(cap[2], true));
continue;
}
// br
if (cap = this.rules.br.exec(src)) {
src = src.substring(cap[0].length);
out += this.renderer.br();
continue;
}
// del (gfm)
if (cap = this.rules.del.exec(src)) {
src = src.substring(cap[0].length);
out += this.renderer.del(this.output(cap[1]));
continue;
}
// text
if (cap = this.rules.text.exec(src)) {
src = src.substring(cap[0].length);
out += escape(this.smartypants(cap[0]));
continue;
}
if (src) {
throw new
Error('Infinite loop on byte: ' + src.charCodeAt(0));
}
}
return out;
};
/**
* Compile Link
*/
InlineLexer.prototype.outputLink = function(cap, link) {
var href = escape(link.href)
, title = link.title ? escape(link.title) : null;
return cap[0].charAt(0) !== '!'
? this.renderer.link(href, title, this.output(cap[1]))
: this.renderer.image(href, title, escape(cap[1]));
};
/**
* Smartypants Transformations
*/
InlineLexer.prototype.smartypants = function(text) {
if (!this.options.smartypants) return text;
return text
// em-dashes
.replace(/--/g, '\u2014')
// opening singles
.replace(/(^|[-\u2014/(\[{"\s])'/g, '$1\u2018')
// closing singles & apostrophes
.replace(/'/g, '\u2019')
// opening doubles
.replace(/(^|[-\u2014/(\[{\u2018\s])"/g, '$1\u201c')
// closing doubles
.replace(/"/g, '\u201d')
// ellipses
.replace(/\.{3}/g, '\u2026');
};
/**
* Mangle Links
*/
InlineLexer.prototype.mangle = function(text) {
var out = ''
, l = text.length
, i = 0
, ch;
for (; i < l; i++) {
ch = text.charCodeAt(i);
if (Math.random() > 0.5) {
ch = 'x' + ch.toString(16);
}
out += '&#' + ch + ';';
}
return out;
};
/**
* Renderer
*/
function Renderer(options) {
this.options = options || {};
}
Renderer.prototype.code = function(code, lang, escaped) {
if (this.options.highlight) {
var out = this.options.highlight(code, lang);
if (out != null && out !== code) {
escaped = true;
code = out;
}
}
if (!lang) {
return '<pre><code>'
+ (escaped ? code : escape(code, true))
+ '\n</code></pre>';
}
return '<pre><code class="'
+ this.options.langPrefix
+ escape(lang, true)
+ '">'
+ (escaped ? code : escape(code, true))
+ '\n</code></pre>\n';
};
Renderer.prototype.blockquote = function(quote) {
return '<blockquote>\n' + quote + '</blockquote>\n';
};
Renderer.prototype.html = function(html) {
return html;
};
Renderer.prototype.heading = function(text, level, raw) {
return '<h'
+ level
+ ' id="'
+ this.options.headerPrefix
+ raw.toLowerCase().replace(/[^\w]+/g, '-')
+ '">'
+ text
+ '</h'
+ level
+ '>\n';
};
Renderer.prototype.hr = function() {
return this.options.xhtml ? '<hr/>\n' : '<hr>\n';
};
Renderer.prototype.list = function(body, ordered) {
var type = ordered ? 'ol' : 'ul';
return '<' + type + '>\n' + body + '</' + type + '>\n';
};
Renderer.prototype.listitem = function(text) {
return '<li>' + text + '</li>\n';
};
Renderer.prototype.paragraph = function(text) {
return '<p>' + text + '</p>\n';
};
Renderer.prototype.table = function(header, body) {
return '<table>\n'
+ '<thead>\n'
+ header
+ '</thead>\n'
+ '<tbody>\n'
+ body
+ '</tbody>\n'
+ '</table>\n';
};
Renderer.prototype.tablerow = function(content) {
return '<tr>\n' + content + '</tr>\n';
};
Renderer.prototype.tablecell = function(content, flags) {
var type = flags.header ? 'th' : 'td';
var tag = flags.align
? '<' + type + ' style="text-align:' + flags.align + '">'
: '<' + type + '>';
return tag + content + '</' + type + '>\n';
};
// span level renderer
Renderer.prototype.strong = function(text) {
return '<strong>' + text + '</strong>';
};
Renderer.prototype.em = function(text) {
return '<em>' + text + '</em>';
};
Renderer.prototype.codespan = function(text) {
return '<code>' + text + '</code>';
};
Renderer.prototype.br = function() {
return this.options.xhtml ? '<br/>' : '<br>';
};
Renderer.prototype.del = function(text) {
return '<del>' + text + '</del>';
};
Renderer.prototype.link = function(href, title, text) {
if (this.options.sanitize) {
try {
var prot = decodeURIComponent(unescape(href))
.replace(/[^\w:]/g, '')
.toLowerCase();
} catch (e) {
return '';
}
if (prot.indexOf('javascript:') === 0) {
return '';
}
}
var out = '<a href="' + href + '"';
if (title) {
out += ' title="' + title + '"';
}
out += '>' + text + '</a>';
return out;
};
Renderer.prototype.image = function(href, title, text) {
var out = '<img src="' + href + '" alt="' + text + '"';
if (title) {
out += ' title="' + title + '"';
}
out += this.options.xhtml ? '/>' : '>';
return out;
};
/**
* Parsing & Compiling
*/
function Parser(options) {
this.tokens = [];
this.token = null;
this.options = options || marked.defaults;
this.options.renderer = this.options.renderer || new Renderer;
this.renderer = this.options.renderer;
this.renderer.options = this.options;
}
/**
* Static Parse Method
*/
Parser.parse = function(src, options, renderer) {
var parser = new Parser(options, renderer);
return parser.parse(src);
};
/**
* Parse Loop
*/
Parser.prototype.parse = function(src) {
this.inline = new InlineLexer(src.links, this.options, this.renderer);
this.tokens = src.reverse();
var out = '';
while (this.next()) {
out += this.tok();
}
return out;
};
/**
* Next Token
*/
Parser.prototype.next = function() {
return this.token = this.tokens.pop();
};
/**
* Preview Next Token
*/
Parser.prototype.peek = function() {
return this.tokens[this.tokens.length - 1] || 0;
};
/**
* Parse Text Tokens
*/
Parser.prototype.parseText = function() {
var body = this.token.text;
while (this.peek().type === 'text') {
body += '\n' + this.next().text;
}
return this.inline.output(body);
};
/**
* Parse Current Token
*/
Parser.prototype.tok = function() {
switch (this.token.type) {
case 'space': {
return '';
}
case 'hr': {
return this.renderer.hr();
}
case 'heading': {
return this.renderer.heading(
this.inline.output(this.token.text),
this.token.depth,
this.token.text);
}
case 'code': {
return this.renderer.code(this.token.text,
this.token.lang,
this.token.escaped);
}
case 'table': {
var header = ''
, body = ''
, i
, row
, cell
, flags
, j;
// header
cell = '';
for (i = 0; i < this.token.header.length; i++) {
flags = { header: true, align: this.token.align[i] };
cell += this.renderer.tablecell(
this.inline.output(this.token.header[i]),
{ header: true, align: this.token.align[i] }
);
}
header += this.renderer.tablerow(cell);
for (i = 0; i < this.token.cells.length; i++) {
row = this.token.cells[i];
cell = '';
for (j = 0; j < row.length; j++) {
cell += this.renderer.tablecell(
this.inline.output(row[j]),
{ header: false, align: this.token.align[j] }
);
}
body += this.renderer.tablerow(cell);
}
return this.renderer.table(header, body);
}
case 'blockquote_start': {
var body = '';
while (this.next().type !== 'blockquote_end') {
body += this.tok();
}
return this.renderer.blockquote(body);
}
case 'list_start': {
var body = ''
, ordered = this.token.ordered;
while (this.next().type !== 'list_end') {
body += this.tok();
}
return this.renderer.list(body, ordered);
}
case 'list_item_start': {
var body = '';
while (this.next().type !== 'list_item_end') {
body += this.token.type === 'text'
? this.parseText()
: this.tok();
}
return this.renderer.listitem(body);
}
case 'loose_item_start': {
var body = '';
while (this.next().type !== 'list_item_end') {
body += this.tok();
}
return this.renderer.listitem(body);
}
case 'html': {
var html = !this.token.pre && !this.options.pedantic
? this.inline.output(this.token.text)
: this.token.text;
return this.renderer.html(html);
}
case 'paragraph': {
return this.renderer.paragraph(this.inline.output(this.token.text));
}
case 'text': {
return this.renderer.paragraph(this.parseText());
}
}
};
/**
* Helpers
*/
function escape(html, encode) {
return html
.replace(!encode ? /&(?!#?\w+;)/g : /&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>')
.replace(/"/g, '"')
.replace(/'/g, ''');
}
function unescape(html) {
return html.replace(/&([#\w]+);/g, function(_, n) {
n = n.toLowerCase();
if (n === 'colon') return ':';
if (n.charAt(0) === '#') {
return n.charAt(1) === 'x'
? String.fromCharCode(parseInt(n.substring(2), 16))
: String.fromCharCode(+n.substring(1));
}
return '';
});
}
function replace(regex, opt) {
regex = regex.source;
opt = opt || '';
return function self(name, val) {
if (!name) return new RegExp(regex, opt);
val = val.source || val;
val = val.replace(/(^|[^\[])\^/g, '$1');
regex = regex.replace(name, val);
return self;
};
}
function noop() {}
noop.exec = noop;
function merge(obj) {
var i = 1
, target
, key;
for (; i < arguments.length; i++) {
target = arguments[i];
for (key in target) {
if (Object.prototype.hasOwnProperty.call(target, key)) {
obj[key] = target[key];
}
}
}
return obj;
}
/**
* Marked
*/
function marked(src, opt, callback) {
if (callback || typeof opt === 'function') {
if (!callback) {
callback = opt;
opt = null;
}
opt = merge({}, marked.defaults, opt || {});
var highlight = opt.highlight
, tokens
, pending
, i = 0;
try {
tokens = Lexer.lex(src, opt)
} catch (e) {
return callback(e);
}
pending = tokens.length;
var done = function(err) {
if (err) {
opt.highlight = highlight;
return callback(err);
}
var out;
try {
out = Parser.parse(tokens, opt);
} catch (e) {
err = e;
}
opt.highlight = highlight;
return err
? callback(err)
: callback(null, out);
};
if (!highlight || highlight.length < 3) {
return done();
}
delete opt.highlight;
if (!pending) return done();
for (; i < tokens.length; i++) {
(function(token) {
if (token.type !== 'code') {
return --pending || done();
}
return highlight(token.text, token.lang, function(err, code) {
if (err) return done(err);
if (code == null || code === token.text) {
return --pending || done();
}
token.text = code;
token.escaped = true;
--pending || done();
});
})(tokens[i]);
}
return;
}
try {
if (opt) opt = merge({}, marked.defaults, opt);
return Parser.parse(Lexer.lex(src, opt), opt);
} catch (e) {
e.message += '\nPlease report this to https://github.com/chjj/marked.';
if ((opt || marked.defaults).silent) {
return '<p>An error occured:</p><pre>'
+ escape(e.message + '', true)
+ '</pre>';
}
throw e;
}
}
/**
* Options
*/
marked.options =
marked.setOptions = function(opt) {
merge(marked.defaults, opt);
return marked;
};
marked.defaults = {
gfm: true,
tables: true,
breaks: false,
pedantic: false,
sanitize: false,
smartLists: false,
silent: false,
highlight: null,
langPrefix: 'lang-',
smartypants: false,
headerPrefix: '',
renderer: new Renderer,
xhtml: false
};
/**
* Expose
*/
marked.Parser = Parser;
marked.parser = Parser.parse;
marked.Renderer = Renderer;
marked.Lexer = Lexer;
marked.lexer = Lexer.lex;
marked.InlineLexer = InlineLexer;
marked.inlineLexer = InlineLexer.output;
marked.parse = marked;
if (typeof module !== 'undefined' && typeof exports === 'object') {
module.exports = marked;
} else if (typeof define === 'function' && define.amd) {
define(function() { return marked; });
} else {
this.marked = marked;
}
}).call(function() {
return this || (typeof window !== 'undefined' ? window : global);
}());
|
PypiClean
|
/defichainTest-0.0.1b1-py3-none-any.whl/defichain/transactions/builder/rawtransactionbuilder.py
|
from defichain import Account
from defichain.exceptions.transactions import TxBuilderError, NotYetSupportedError
from defichain.transactions.address import Address
from defichain.transactions.constants import AddressTypes
from defichain.networks import Network
from defichain.transactions.remotedata.remotedata import RemoteData
from defichain.transactions.rawtransactions import Transaction, TxP2WPKHInput, TxP2SHInput, TxAddressOutput, \
TxDefiOutput, estimate_fee
from defichain.transactions.defitx.modules.basedefitx import BaseDefiTx
class RawTransactionBuilder:
@staticmethod
def new_transaction() -> Transaction:
return Transaction([], [])
def __init__(self, address: str, account: Account, dataSource: RemoteData, feePerByte: float):
self._address, self._account, self._dataSource, self._feePerByte = None, None, None, None
self.set_address(address)
self.set_account(account)
self.set_dataSource(dataSource)
self.set_feePerByte(feePerByte)
# Build Transaction
def build_transactionInputs(self, inputs=[]) -> Transaction:
tx = self.new_transaction()
if inputs or self.get_dataSource() is None:
tx.set_inputs(inputs)
else:
for input in self.get_dataSource().get_unspent(self.get_address()):
address = Address.from_scriptPublicKey(self.get_account().get_network(), input["scriptPubKey"])
# Build P2PKH Input
if address.get_addressType() == AddressTypes.P2PKH:
raise NotYetSupportedError()
# Build P2SH Input
elif address.get_addressType() == AddressTypes.P2SH:
tx.add_input(TxP2SHInput(input["txid"], input["vout"], self.get_account().get_p2wpkh(),
input["value"]))
# build P2WPKH Input
elif address.get_addressType() == AddressTypes.P2WPKH:
tx.add_input(TxP2WPKHInput(input["txid"], input["vout"], self.get_address(), input["value"]))
return tx
def build_defiTx(self, value: int, defiTx: BaseDefiTx, inputs=[]) -> Transaction:
tx = self.build_transactionInputs(inputs)
defitx_output = TxDefiOutput(value, defiTx)
if tx.get_inputsValue() - value < 0:
raise TxBuilderError("The value of the output is bigger then the value of the input")
change_output = TxAddressOutput(tx.get_inputsValue() - value, self.get_address())
tx.add_output(defitx_output)
tx.add_output(change_output)
# Calculate fee
fee = estimate_fee(tx, self.get_feePerByte())
# Subtract fee from output
value = tx.get_outputs()[1].get_value() - fee
if value < 0:
raise TxBuilderError("The used address has not enough UTXO to pay the transaction fee")
tx.get_outputs()[1].set_value(value)
# Sign and Return
self.sign(tx)
return tx
def sign(self, tx: Transaction) -> None:
tx.sign(self.get_account().get_network(), [self.get_account().get_privateKey()])
# Get Information
def get_address(self) -> str:
return self._address
def get_addressType(self) -> str:
return Address.from_address(self.get_address()).get_addressType()
def get_account(self) -> Account:
return self._account
def get_dataSource(self) -> "RemoteData":
return self._dataSource
def get_feePerByte(self) -> float:
return self._feePerByte
def get_network(self) -> Network:
return self.get_account().get_network()
# Set Information
def set_address(self, address: str) -> None:
self._address = address
def set_account(self, account: Account) -> None:
self._account = account
def set_dataSource(self, dataSource: RemoteData) -> None:
self._dataSource = dataSource
def set_feePerByte(self, feePerByte: float) -> None:
self._feePerByte = feePerByte
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_20/models/offload_google_cloud.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class OffloadGoogleCloud(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_key_id': 'str',
'bucket': 'str',
'secret_access_key': 'str'
}
attribute_map = {
'access_key_id': 'access_key_id',
'bucket': 'bucket',
'secret_access_key': 'secret_access_key'
}
required_args = {
}
def __init__(
self,
access_key_id=None, # type: str
bucket=None, # type: str
secret_access_key=None, # type: str
):
"""
Keyword args:
access_key_id (str): The access key ID of the Google Cloud account used to create a connection between the array and a Google Cloud offload target. The access key ID is 24 characters in length and is only accepted when creating the connection between the array and the Google Cloud offload target. The `access_key_id`, `secret_access_key`, and `bucket` parameters must be set together.
bucket (str): The name of the Google Cloud Storage bucket to which the data will be offloaded. Grant basic read and write access permissions to the bucket and verify that the bucket is empty of all objects. The `access_key_id`, `secret_access_key`, and `bucket` parameters must be set together.
secret_access_key (str): The secret access key that goes with the access key ID of the Google Cloud account. The secret access key is 40 characters in length is only accepted when creating the connection between the array and the Google Cloud offload target. The `access_key_id`, `secret_access_key`, and `bucket` parameters must be set together.
"""
if access_key_id is not None:
self.access_key_id = access_key_id
if bucket is not None:
self.bucket = bucket
if secret_access_key is not None:
self.secret_access_key = secret_access_key
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `OffloadGoogleCloud`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `OffloadGoogleCloud`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `OffloadGoogleCloud`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `OffloadGoogleCloud`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OffloadGoogleCloud, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OffloadGoogleCloud):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/universal_pathlib_edge-0.0.13.tar.gz/universal_pathlib_edge-0.0.13/upath/core.py
|
import os
import pathlib
import re
import urllib
from abc import ABCMeta
from fsspec.registry import (
get_filesystem_class,
known_implementations,
registry,
)
from fsspec.utils import stringify_path
from upath.errors import NotDirectoryError
class _FSSpecAccessor:
def __init__(self, parsed_url, *args, **kwargs):
self._url = parsed_url
cls = get_filesystem_class(self._url.scheme)
url_kwargs = cls._get_kwargs_from_urls(
urllib.parse.urlunparse(self._url)
)
url_kwargs.update(kwargs)
self._fs = cls(**url_kwargs)
def transform_args_wrapper(self, func):
"""Modifies the arguments that get passed to the filesystem so that
the UPath instance gets stripped as the first argument. If a
path keyword argument is not given, then `UPath.path` is
formatted for the filesystem and inserted as the first argument.
If it is, then the path keyword argument is formatted properly for
the filesystem.
"""
def wrapper(*args, **kwargs):
args, kwargs = self._transform_arg_paths(args, kwargs)
return func(*args, **kwargs)
return wrapper
def _transform_arg_paths(self, args, kwargs):
"""Formats the path properly for the filesystem backend."""
args = list(args)
first_arg = args.pop(0)
if not kwargs.get("path"):
if isinstance(first_arg, UPath):
first_arg = self._format_path(first_arg.path)
args.insert(0, first_arg)
args = tuple(args)
else:
kwargs["path"] = self._format_path(kwargs["path"])
return args, kwargs
def _format_path(self, s):
"""Placeholder method for subclassed filesystems"""
return s
def __getattribute__(self, item):
class_attrs = ["_url", "_fs", "__class__"]
if item in class_attrs:
return super().__getattribute__(item)
class_methods = [
"__init__",
"__getattribute__",
"transform_args_wrapper",
"_transform_arg_paths",
"_format_path",
]
if item in class_methods:
return lambda *args, **kwargs: getattr(self.__class__, item)(
self, *args, **kwargs
)
d = object.__getattribute__(self, "__dict__")
fs = d.get("_fs", None)
if fs is not None:
method = getattr(fs, item, None)
if method:
return lambda *args, **kwargs: (
self.transform_args_wrapper(method)(*args, **kwargs)
) # noqa: E501
else:
raise NotImplementedError(
f"{fs.protocol} filesystem has no attribute {item}"
)
class PureUPath(pathlib.PurePath):
_flavour = pathlib._posix_flavour
__slots__ = ()
class UPathMeta(ABCMeta):
def __instancecheck__(cls, instance):
return isinstance(instance, pathlib.Path)
def __subclasscheck__(cls, subclass):
return issubclass(subclass, pathlib.Path)
class UPath(pathlib.Path, PureUPath, metaclass=UPathMeta):
__slots__ = ("_url", "_kwargs", "_closed", "fs")
not_implemented = [
"cwd",
"home",
"expanduser",
"group",
"is_mount",
"is_symlink",
"is_socket",
"is_fifo",
"is_block_device",
"is_char_device",
"lchmod",
"lstat",
"owner",
"readlink",
]
_default_accessor = _FSSpecAccessor
def __new__(cls, *args, **kwargs):
if issubclass(cls, UPath):
args_list = list(args)
url = args_list.pop(0)
url = stringify_path(url)
parsed_url = urllib.parse.urlparse(url)
for key in ["scheme", "netloc"]:
val = kwargs.get(key)
if val:
parsed_url = parsed_url._replace(**{key: val})
# treat as local filesystem, return PosixPath or WindowsPath
impls = list(registry) + list(known_implementations.keys())
if not parsed_url.scheme or parsed_url.scheme not in impls:
cls = (
pathlib.WindowsPath
if os.name == "nt"
else pathlib.PosixPath
)
self = cls._from_parts(args, init=False)
if not self._flavour.is_supported:
raise NotImplementedError(
"cannot instantiate %r on your system" % (cls.__name__,)
)
self._init()
else:
import upath.registry
cls = upath.registry._registry[parsed_url.scheme]
kwargs["_url"] = parsed_url
args_list.insert(0, parsed_url.path)
args = tuple(args_list)
self = cls._from_parts_init(args, init=False)
self._init(*args, **kwargs)
else:
self = super().__new__(*args, **kwargs)
return self
def _init(self, *args, template=None, **kwargs):
self._closed = False
if not kwargs:
kwargs = dict(**self._kwargs)
else:
self._kwargs = dict(**kwargs)
self._url = kwargs.pop("_url") if kwargs.get("_url") else None
if not self._root:
if not self._parts:
self._root = "/"
elif self._parts[0] == "/":
self._root = self._parts.pop(0)
if getattr(self, "_str", None):
delattr(self, "_str")
if template is not None:
self._accessor = template._accessor
else:
self._accessor = self._default_accessor(self._url, *args, **kwargs)
self.fs = self._accessor._fs
def __getattribute__(self, item):
if item == "__class__":
return super().__getattribute__("__class__")
if item in getattr(self.__class__, "not_implemented"):
raise NotImplementedError(f"UPath has no attribute {item}")
else:
return super().__getattribute__(item)
def _format_parsed_parts(self, drv, root, parts):
if parts:
join_parts = parts[1:] if parts[0] == "/" else parts
else:
join_parts = []
if drv or root:
path = drv + root + self._flavour.join(join_parts)
else:
path = self._flavour.join(join_parts)
scheme, netloc = self._url.scheme, self._url.netloc
scheme = scheme + ":"
netloc = "//" + netloc if netloc else ""
formatted = scheme + netloc + path
return formatted
@property
def path(self):
if self._parts:
join_parts = (
self._parts[1:] if self._parts[0] == "/" else self._parts
)
path = self._flavour.join(join_parts)
return self._root + path
else:
return "/"
def open(self, *args, **kwargs):
return self._accessor.open(self, *args, **kwargs)
def iterdir(self):
"""Iterate over the files in this directory. Does not yield any
result for the special paths '.' and '..'.
"""
if self._closed:
self._raise_closed()
for name in self._accessor.listdir(self):
# fsspec returns dictionaries
if isinstance(name, dict):
name = name.get("name")
if name in {".", ".."}:
# Yielding a path object for these makes little sense
continue
# only want the path name with iterdir
name = self._sub_path(name)
yield self._make_child_relpath(name)
if self._closed:
self._raise_closed()
def glob(self, pattern):
path = self.joinpath(pattern)
for name in self._accessor.glob(self, path=path.path):
name = self._sub_path(name)
name = name.split(self._flavour.sep)
yield self._make_child(name)
def _sub_path(self, name):
# only want the path name with iterdir
sp = self.path
return re.sub(f"^({sp}|{sp[1:]})/", "", name)
def exists(self):
"""
Whether this path exists.
"""
if not getattr(self._accessor, "exists"):
try:
self._accessor.stat(self)
except (FileNotFoundError):
return False
return True
else:
return self._accessor.exists(self)
def is_dir(self):
info = self._accessor.info(self)
if info["type"] == "directory":
return True
return False
def is_file(self):
info = self._accessor.info(self)
if info["type"] == "file":
return True
return False
def chmod(self, mod):
raise NotImplementedError
def rename(self, target):
# can be implemented, but may be tricky
raise NotImplementedError
def touch(self, trunicate=True, **kwargs):
self._accessor.touch(self, trunicate=trunicate, **kwargs)
def unlink(self, missing_ok=False):
if not self.exists():
if not missing_ok:
raise FileNotFoundError
else:
return
self._accessor.rm(self, recursive=False)
def rmdir(self, recursive=True):
"""Add warning if directory not empty
assert is_dir?
"""
try:
assert self.is_dir()
except AssertionError:
raise NotDirectoryError
self._accessor.rm(self, recursive=recursive)
@classmethod
def _from_parts_init(cls, args, init=False):
return super()._from_parts(args, init=init)
def _from_parts(self, args, init=True):
# We need to call _parse_args on the instance, so as to get the
# right flavour.
obj = object.__new__(self.__class__)
drv, root, parts = self._parse_args(args)
obj._drv = drv
obj._root = root
obj._parts = parts
if init:
obj._init(**self._kwargs)
return obj
def _from_parsed_parts(self, drv, root, parts, init=True):
obj = object.__new__(self.__class__)
obj._drv = drv
obj._root = root
obj._parts = parts
if init:
obj._init(**self._kwargs)
return obj
def __truediv__(self, key):
# Add `/` root if not present
if len(self._parts) == 0:
key = f"{self._root}{key}"
# Adapted from `PurePath._make_child`
drv, root, parts = self._parse_args((key,))
drv, root, parts = self._flavour.join_parsed_parts(
self._drv, self._root, self._parts, drv, root, parts
)
kwargs = self._kwargs.copy()
kwargs.pop("_url")
# Create a new object
out = self.__class__(
self._format_parsed_parts(drv, root, parts),
**kwargs,
)
return out
def __setstate__(self, state):
kwargs = state["_kwargs"].copy()
kwargs["_url"] = self._url
self._kwargs = kwargs
# _init needs to be called again, because when __new__ called _init,
# the _kwargs were not yet set
self._init()
def __reduce__(self):
kwargs = self._kwargs.copy()
kwargs.pop("_url", None)
return (
self.__class__,
(self._format_parsed_parts(self._drv, self._root, self._parts),),
{"_kwargs": kwargs},
)
|
PypiClean
|
/ikaaro-0.75.0.tar.gz/ikaaro-0.75.0/resource_.py
|
# Import from the Standard Library
from pickle import dumps
from os.path import basename, dirname
# Import from itools
from itools.core import is_prototype, lazy
from itools.csv import Property
from itools.database import Resource, Metadata, register_field
from itools.database import AndQuery, NotQuery, PhraseQuery
from itools.datatypes import Boolean, DateTime, Integer, String, Unicode
from itools.gettext import MSG
from itools.handlers import Folder as FolderHandler
from itools.log import log_warning
from itools.uri import Path
from itools.web import BaseView, get_context
# Import from ikaaro
from autoadd import AutoAdd
from autoedit import AutoEdit
from autoform import CheckboxWidget
from datatypes import CopyCookie
from enumerates import Groups_Datatype
from exceptions import ConsistencyError
from fields import Char_Field, Datetime_Field, File_Field, HTMLFile_Field
from fields import Select_Field, Text_Field, Textarea_Field
from popup import DBResource_AddImage, DBResource_AddLink
from popup import DBResource_AddMedia
from resource_views import DBResource_Remove
from resource_views import DBResource_Links, DBResource_Backlinks
from resource_views import LoginView, LogoutView
from resource_views import Put_View, Delete_View
from resource_views import DBResource_GetFile, DBResource_GetImage
from rest import Rest_Login, Rest_Schema, Rest_Query
from rest import Rest_Create, Rest_Read, Rest_Update, Rest_Delete
from revisions_views import DBResource_CommitLog, DBResource_Changes
from utils import get_base_path_query
class Share_Field(Select_Field):
title = MSG(u'Share')
datatype = Groups_Datatype
widget = CheckboxWidget
multiple = True
indexed = True
def access(self, mode, resource):
context = get_context()
return context.root.is_allowed_to_share(context.user, resource)
class DBResource(Resource):
class_version = '20071215'
class_description = None
class_icon16 = 'icons/16x16/resource.png'
class_icon48 = 'icons/48x48/resource.png'
class_views = []
context_menus = []
def __init__(self, metadata):
self.metadata = metadata
def __eq__(self, resource):
if not isinstance(resource, DBResource):
error = "cannot compare DBResource and %s" % type(resource)
raise TypeError, error
return self.abspath == resource.abspath
def __ne__(self, node):
return not self.__eq__(node)
#######################################################################
# API / Tree
#######################################################################
@property
def database(self):
return self.metadata.database
@lazy
def parent(self):
abspath = self.abspath
if len(abspath) == 0:
return None
return self.get_resource(abspath[:-1])
@property
def name(self):
return self.abspath.get_name()
def get_root(self):
return self.get_resource('/')
def get_pathto(self, resource):
return self.abspath.get_pathto(resource.abspath)
#######################################################################
# API / Folderish
#######################################################################
__fixed_handlers__ = [] # Resources that cannot be removed
@property
def handler(self):
cls = FolderHandler
key = self.metadata.key[:-9]
handler = self.database.get_handler(key, cls=cls, soft=True)
if handler is None:
handler = cls()
self.database.push_phantom(key, handler)
return handler
def get_handlers(self):
"""Return all the handlers attached to this resource, except the
metadata.
"""
handlers = [self.handler]
# Fields
for name, field in self.get_fields():
if issubclass(field, File_Field):
value = field.get_value(self, name)
if value is not None:
handlers.append(value)
# Ok
return handlers
def _get_names(self):
folder = self.handler
return [ x[:-9] for x in folder.get_handler_names()
if x[-9:] == '.metadata' ]
def get_names(self, path='.'):
resource = self.get_resource(path)
return resource._get_names()
def get_resource(self, path, soft=False):
if type(path) is not Path:
path = Path(path)
# 1. Get the metadata
if path.is_absolute():
abspath = path
else:
abspath = self.abspath.resolve2(path)
return self.database.get_resource(abspath, soft=soft)
def get_resources(self, path='.'):
here = self.get_resource(path)
for name in here._get_names():
yield here.get_resource(name)
def make_resource_name(self):
max_id = -1
for name in self.get_names():
# Mixing explicit and automatically generated names is allowed
try:
id = int(name)
except ValueError:
continue
if id > max_id:
max_id = id
return str(max_id + 1)
def make_resource(self, name, cls, soft=False, **kw):
# Automatic name
if name is None:
name = self.make_resource_name()
# Make a resource somewhere else
if '/' in name:
path = dirname(name)
name = basename(name)
resource = self.get_resource(path)
resource.make_resource(name, cls, soft=soft, **kw)
return
# Soft
if soft is True:
resource = self.get_resource(name, soft=True)
if resource:
return resource
# Make the metadata
metadata = Metadata(cls=cls)
self.handler.set_handler('%s.metadata' % name, metadata)
metadata.set_property('mtime', get_context().timestamp)
# Initialize
resource = self.get_resource(name)
resource.init_resource(**kw)
# Ok
self.database.add_resource(resource)
return resource
def del_resource(self, name, soft=False, ref_action='restrict'):
"""ref_action allows to specify which action is done before deleting
the resource.
ref_action can take 2 values:
- 'restrict' (default value): do an integrity check
- 'force': do nothing
"""
database = self.database
resource = self.get_resource(name, soft=soft)
if soft and resource is None:
return
# Referential action
if ref_action == 'restrict':
# Check referencial-integrity (FIXME Check sub-resources too)
path = str(resource.abspath)
query_base_path = get_base_path_query(path)
query = AndQuery(PhraseQuery('links', path),
NotQuery(query_base_path))
results = database.search(query)
# A resource may have been updated in the same transaction,
# so not yet reindexed: we need to check that the resource
# really links.
for referrer in results.get_resources():
if path in referrer.get_links():
err = 'cannot delete, resource "%s" is referenced'
raise ConsistencyError, err % path
elif ref_action == 'force':
# Do not check referencial-integrity
pass
else:
raise ValueError, 'Incorrect ref_action "%s"' % ref_action
# Events, remove
path = str(resource.abspath)
database.remove_resource(resource)
# Remove
fs = database.fs
for handler in resource.get_handlers():
# Skip empty folders and phantoms
if fs.exists(handler.key):
database.del_handler(handler.key)
self.handler.del_handler('%s.metadata' % name)
# Clear cookie
context = get_context()
cut, paths = context.get_cookie('ikaaro_cp', datatype=CopyCookie)
if path in paths:
context.del_cookie('ikaaro_cp')
def copy_resource(self, source, target):
raise NotImplementedError
def move_resource(self, source, target):
raise NotImplementedError
def traverse_resources(self):
yield self
for name in self._get_names():
resource = self.get_resource(name)
for x in resource.traverse_resources():
yield x
#######################################################################
# API / Views
#######################################################################
def get_default_view_name(self):
views = self.class_views
if not views:
return None
context = get_context()
for view_name in views:
view = getattr(self, view_name, None)
if context.is_access_allowed(self, view):
return view_name
return views[0]
def get_view(self, name, query=None):
# To define a default view, override this
if name is None:
name = self.get_default_view_name()
if name is None:
return None
# Explicit view, defined by name
view = getattr(self, name, None)
if is_prototype(view, BaseView):
context = get_context()
view = view(resource=self, context=context) # bind
return view
return None
def get_context_menus(self):
return self.context_menus
########################################################################
# Properties
########################################################################
def get_value(self, name, language=None):
field = self.get_field(name)
if field is None:
return None
return field.get_value(self, name, language)
def set_value(self, name, value, language=None, **kw):
field = self.get_field(name)
if field is None:
raise ValueError, 'Field %s do not exist' % name
return field.set_value(self, name, value, language, **kw)
def get_value_title(self, name, language=None):
field = self.get_field(name)
if field is None:
return None
return field.get_value_title(self, name, language)
def get_brain_value(self, name):
brain = get_context().database.search(
PhraseQuery('abspath', str(self.abspath))).get_documents()[0]
return getattr(brain, name, None)
def get_html_field_body_stream(self, name, language=None):
"""Utility method, returns the stream for the given html field.
"""
# 1. Check it is an html-file field
field = self.get_field(name)
if not is_prototype(field, HTMLFile_Field):
raise ValueError, 'expected html-file field'
# 2. Get the handler
handler = field.get_value(self, name, language)
if not handler:
handler = field.class_handler()
# 3. Get the body
body = handler.get_body()
if not body:
raise ValueError, 'html file does not have a body'
return body.get_content_elements()
def get_property(self, name, language=None):
property = self.metadata.get_property(name, language=language)
if property:
return property
field = self.get_field(name)
if field is None:
return None
default = field.get_default()
if field.multiple:
return [ Property(x) for x in default ]
return Property(default)
# XXX Backwards compatibility
set_property = set_value
def get_page_title(self):
return self.get_title()
def init_resource(self, **kw):
"""Return a Metadata object with sensible default values.
"""
# Ownership
owner = self.get_field('owner')
if owner:
user = get_context().user
if user:
self.set_value('owner', str(user.abspath))
# Keyword parameters
for name, value in kw.items():
field = self.get_field(name)
if field is None:
raise ValueError, 'undefined field "%s"' % name
if type(value) is dict:
for lang in value:
field._set_value(self, name, value[lang], lang)
else:
field._set_value(self, name, value)
def load_handlers(self):
self.get_handlers()
########################################################################
# Fields
########################################################################
mtime = Datetime_Field(indexed=True, stored=True, readonly=True)
last_author = Char_Field(indexed=False, stored=True, readonly=True)
title = Text_Field(indexed=True, stored=True, title=MSG(u'Title'))
description = Textarea_Field(indexed=True, title=MSG(u'Description'),
hidden_by_default=True)
subject = Text_Field(indexed=True, title=MSG(u'Keywords'),
hidden_by_default=True)
share = Share_Field
@property
def is_content(self):
return self.parent.is_content
def has_property(self, name, language=None):
return self.metadata.has_property(name, language=language)
def del_property(self, name):
if self.has_property(name):
self.database.change_resource(self)
self.metadata.del_property(name)
########################################################################
# Versioning
########################################################################
def get_files_to_archive(self, content=False):
metadata = self.metadata.key
if content is True:
folder = self.handler.key
return [metadata, folder]
return [metadata]
def get_revisions(self, n=None, content=False, author_pattern=None,
grep_pattern=None):
if self.parent is None and content is True:
files = None
else:
files = self.get_files_to_archive(content)
worktree = self.database.worktree
return worktree.git_log(files, n, author_pattern, grep_pattern)
def get_owner(self):
return self.get_value('owner')
def get_share(self):
return self.get_value('share')
########################################################################
# Indexing
########################################################################
def to_text(self):
"""This function must return:
1) An unicode text.
or
2) A dict in a multilingual context:
{'fr': u'....',
'en': u'....' ....}
"""
raise NotImplementedError
def get_catalog_values(self):
values = {}
# Step 1. Automatically index fields
languages = self.get_root().get_value('website_languages')
for name, field in self.get_fields():
if not field.indexed and not field.stored:
continue
if field.multilingual:
value = {}
for language in languages:
value[language] = field.get_value(self, name, language)
values[name] = value
else:
values[name] = field.get_value(self, name)
# Step 2. Index non-metadata properties
# Path related fields
abspath = self.abspath
values['abspath'] = str(abspath)
n = len(abspath)
values['abspath_depth'] = n
if n:
values['parent_paths'] = [ str(abspath[:i]) for i in range(n) ]
values['name'] = self.name
values['is_content'] = self.is_content
# Class related fields
values['format'] = self.metadata.format
values['base_classes'] = []
for cls in self.__class__.__mro__:
class_id = getattr(cls, 'class_id', None)
if class_id:
values['base_classes'].append(class_id)
# Links to other resources
values['owner'] = self.get_owner()
values['share'] = self.get_share()
values['links'] = list(self.get_links())
# Full text
context = get_context()
try:
server = context.server
except AttributeError:
server = None
if server is not None and server.index_text:
try:
values['text'] = self.to_text()
except NotImplementedError:
pass
except Exception:
log = 'Indexation failed: %s' % abspath
log_warning(log, domain='ikaaro')
# Time events
reminder, payload = self.next_time_event()
values['next_time_event'] = reminder
values['next_time_event_payload'] = dumps(payload)
# Ok
return values
#######################################################################
# Time events
#######################################################################
def next_time_event(self):
return None, None
def time_event(self, payload):
raise NotImplementedError
#######################################################################
# API
#######################################################################
def rename_handlers(self, new_name):
"""Consider we want to rename this resource to the given 'new_name',
return the old a new names for all the attached handlers (except the
metadata).
This method is required by the "move_resource" method.
"""
langs = self.get_resource('/').get_value('website_languages')
aux = [(self.name, new_name)]
for field_name in self.fields:
field = self.get_field(field_name)
if field and issubclass(field, File_Field):
old = '%s.%s' % (self.name, field_name)
new = '%s.%s' % (new_name, field_name)
if field.multilingual:
for language in langs:
aux.append(('%s.%s' % (old, language),
'%s.%s' % (new, language)))
else:
aux.append((old, new))
return aux
def _on_move_resource(self, source):
"""This method updates the links from/to other resources. It is
called when the resource has been moved and/or renamed.
This method is called by 'Database._before_commit', the 'source'
parameter is the place the resource has been moved from.
"""
# (1) Update links to other resources
self.update_incoming_links(Path(source))
# (2) Update resources that link to me
database = self.database
target = self.abspath
query = PhraseQuery('links', source)
results = database.search(query).get_documents()
for result in results:
path = result.abspath
path = database.resources_old2new.get(path, path)
resource = self.get_resource(path)
resource.update_links(source, target)
def get_links(self):
# Automatically from the fields
languages = self.get_resource('/').get_value('website_languages')
links = set()
for field_name in self.fields:
field = self.get_field(field_name)
if field:
field.get_links(links, self, field_name, languages)
# Support for dynamic models
class_id = self.metadata.format
if class_id[0] == '/':
links.add(class_id)
# Ok
return links
def update_links(self, source, target):
"""The resource identified by 'source' is going to be moved to
'target'. Update our links to it.
The parameters 'source' and 'target' are absolute 'Path' objects.
"""
base = str(self.abspath)
old_base = self.database.resources_new2old.get(base, base)
old_base = Path(old_base)
new_base = Path(base)
languages = self.get_resource('/').get_value('website_languages')
for field_name in self.fields:
field = self.get_field(field_name)
if field:
field.update_links(self, field_name, source, target,
languages, old_base, new_base)
self.database.change_resource(self)
def update_incoming_links(self, source):
"""Update the relative links coming out from this resource after it
was moved, so they are not broken. The old path is in parameter. The
new path is "self.abspath".
"""
languages = self.get_resource('/').get_value('website_languages')
for field_name in self.fields:
field = self.get_field(field_name)
if field:
field.update_incoming_links(self, field_name, source,
languages)
########################################################################
# Upgrade
########################################################################
def get_next_versions(self):
cls_version = self.class_version
obj_version = self.metadata.version
# Set zero version if the resource does not have a version
if obj_version is None:
obj_version = '00000000'
# Get all the version numbers
versions = []
for cls in self.__class__.mro():
for name in cls.__dict__.keys():
if not name.startswith('update_'):
continue
kk, version = name.split('_', 1)
if len(version) != 8:
continue
try:
int(version)
except ValueError:
continue
if version > obj_version and version <= cls_version:
versions.append(version)
versions.sort()
return versions
def update(self, version):
# Action
getattr(self, 'update_%s' % version)()
# If the action removes the resource, we are done
metadata = self.metadata
if metadata.key is None:
return
# Update version
metadata.set_changed()
metadata.version = version
#######################################################################
# Icons
#######################################################################
@classmethod
def get_class_icon(cls, size=16):
icon = getattr(cls, 'class_icon%s' % size, None)
if icon is None:
return None
return '/ui/%s' % icon
@classmethod
def get_resource_icon(cls, size=16):
icon = getattr(cls, 'icon%s' % size, None)
if icon is None:
return cls.get_class_icon(size)
return ';icon%s' % size
def get_method_icon(self, view, size='16x16', **kw):
icon = getattr(view, 'icon', None)
if icon is None:
return None
if callable(icon):
icon = icon(self, **kw)
return '/ui/icons/%s/%s' % (size, icon)
#######################################################################
# User interface
#######################################################################
def get_views(self):
context = get_context()
for name in self.class_views:
view_name = name.split('?')[0]
view = self.get_view(view_name)
if context.is_access_allowed(self, view):
yield name, view
def get_title(self, language=None):
title = self.get_value('title', language=language)
if title:
return title
# Fallback to the resource's name
return unicode(self.name)
def get_edit_languages(self, context):
root = self.get_root()
site_languages = root.get_value('website_languages')
default = root.get_default_edit_languages()
# Can not use context.query[] because edit_language is not necessarily
# defined
datatype = String(multiple=True, default=default)
edit_languages = context.get_query_value('edit_language', datatype)
edit_languages = [ x for x in edit_languages if x in site_languages ]
return edit_languages if edit_languages else default
#######################################################################
# Cut & Paste Resources
#######################################################################
def can_paste(self, source):
"""Is the source resource can be pasted into myself.
Question is "can I handle this type of resource?"
"""
raise NotImplementedError
def can_paste_into(self, target):
"""Can I be pasted into the given target.
Question is "Is this container compatible with myself?"
"""
# No restriction by default. Functional modules will want to keep
# their specific resources for them.
return True
# Views
new_instance = AutoAdd(fields=['title', 'location'])
edit = AutoEdit(fields=['title', 'description', 'subject', 'share'])
remove = DBResource_Remove
get_file = DBResource_GetFile
get_image = DBResource_GetImage
# Login/Logout
login = LoginView
logout = LogoutView
# Popups
add_image = DBResource_AddImage
add_link = DBResource_AddLink
add_media = DBResource_AddMedia
# Commit log
commit_log = DBResource_CommitLog
changes = DBResource_Changes
# Links
backlinks = DBResource_Backlinks
links = DBResource_Links
# External editor
http_put = Put_View
http_delete = Delete_View
# Rest (web services)
rest_login = Rest_Login
rest_query = Rest_Query
rest_create = Rest_Create
rest_read = Rest_Read
rest_update = Rest_Update
rest_delete = Rest_Delete
rest_schema = Rest_Schema
###########################################################################
# Register read-only fields
###########################################################################
# Path related fields
register_field('abspath', String(indexed=True, stored=True))
register_field('abspath_depth', Integer(indexed=True, stored=True))
register_field('parent_paths', String(multiple=True, indexed=True))
register_field('name', String(stored=True, indexed=True))
# Class related fields
register_field('format', String(indexed=True, stored=True))
register_field('base_classes', String(multiple=True, indexed=True))
# Referential integrity
register_field('links', String(multiple=True, indexed=True))
# Full text search
register_field('text', Unicode(indexed=True))
# Various classifications
register_field('is_content', Boolean(indexed=True))
# Time events
register_field('next_time_event', DateTime(stored=True))
register_field('next_time_event_payload', String(stored=True))
|
PypiClean
|
/ricsdl-3.1.3-py3-none-any.whl/examples/sync.py
|
#
# This source code is part of the near-RT RIC (RAN Intelligent Controller)
# platform project (RICP).
#
"""
Examples how to use synchronous API functions of the Shared Data Layer (SDL).
Execution of these examples requires:
* Following Redis extension commands have been installed to runtime environment:
- MSETPUB
- SETIE
- SETIEMPUB
- SETNXMPUB
- DELMPUB
- DELIE
- DELIEMPUB
Redis v4.0 or greater is required. Older versions do not support extension modules.
Implementation of above commands is produced by RIC DBaaS:
https://gerrit.o-ran-sc.org/r/admin/repos/ric-plt/dbaas
In official RIC deployments these commands are installed by `dbaas` service to Redis
container(s).
In development environment you may want install commands manually to pod/container, which is
running Redis.
* Following environment variables are needed to set to the pod/container where the application
utilizing SDL is going to be run.
DBAAS_SERVICE_HOST = [DB service address]
DBAAS_SERVICE_PORT= [Comma separated list of DB service ports]. Only one port supported in
RIC deployments, Nokia SEP deployments can have multiple ports.
DBAAS_MASTER_NAME = [Comma separated list of DB names]. Needed to set only if Redis
sentinel is used to provide high availability for Redis DB solution. Only one DB name
supported in RIC deployments, Nokia SEP deployments can have multiple DB names.
DBAAS_SERVICE_SENTINEL_PORT = [Comma separated list of Redis sentinel port number]. Needed
to set only if Redis sentinel is in use. Only one port supported in RIC deployments, Nokia
SEP deployments can have multiple ports.
DBASS_CLUSTER_ADDR_LIST = [Comma separated list of DB service addresses]. Is set only if
more than one Redis sentinel groups are in use. Only in use in Nokia SEP deployments.
In official RIC deployments four first environment variables are defined in Helm configMaps
of the DBaaS and these configurations can be loaded automatically as environment variables
into application pods via `envFrom dbaas-appconfig` statement in an application Helm Charts.
The last environment variable is not for time being in use in official RIC deployments, only
in Nokia SEP deployments.
"""
from ricsdl.syncstorage import SyncStorage
from ricsdl.exceptions import RejectedByBackend, NotConnected, BackendError
# Constants used in the examples below.
MY_NS = 'my_ns'
MY_GRP_NS = 'my_group_ns'
MY_LOCK_NS = 'my_group_ns'
def _try_func_return(func):
"""
Generic wrapper function to call SDL API function and handle exceptions if they are raised.
"""
try:
return func()
except RejectedByBackend as exp:
print(f'SDL function {func.__name__} failed: {str(exp)}')
# Permanent failure, just forward the exception
raise
except (NotConnected, BackendError) as exp:
print(f'SDL function {func.__name__} failed for a temporal error: {str(exp)}')
# Here we could have a retry logic
# Creates SDL instance. The call creates connection to the SDL database backend.
mysdl = _try_func_return(SyncStorage)
# Creates SDL instance what utilizes a fake database backend. Fake database is meant to
# be used only at development phase of SDL clients. It does not provide more advanced
# database services.
# mysdl = _try_func_return(lambda: SyncStorage(fake_db_backend='dict'))
# Checks if SDL is operational. Note that it is not necessary to call `is_active()` after each
# SDL instance creation. Below example is here just to show how to call it spontaneously
# when SDL healthiness is needed to check.
is_active = mysdl.is_active()
assert is_active is True
# Sets a value 'my_value' for a key 'my_key' under given namespace. Note that value
# type must be bytes and multiple key values can be set in one set function call.
_try_func_return(lambda: mysdl.set(MY_NS, {'my_key': b'my_value'}))
# Gets the value of 'my_value' under given namespace.
# Note that the type of returned value is bytes.
my_ret_dict = _try_func_return(lambda: mysdl.get(MY_NS, {'my_key', 'someting not existing'}))
for key, val in my_ret_dict.items():
assert val.decode("utf-8") == u'my_value'
# Sets a value 'my_value2' for a key 'my_key' under given namespace only if the old value is
# 'my_value'.
# Note that value types must be bytes.
was_set = _try_func_return(lambda: mysdl.set_if(MY_NS, 'my_key', b'my_value', b'my_value2'))
assert was_set is True
# Try again. This time value 'my_value2' won't be set, because the key has already 'my_value2'
# value.
was_set = _try_func_return(lambda: mysdl.set_if(MY_NS, 'my_key', b'my_value', b'my_value2'))
assert was_set is False
# Sets a value 'my_value' for a key 'my_key2' under given namespace only if the key doesn't exist.
# Note that value types must be bytes.
was_set = _try_func_return(lambda: mysdl.set_if_not_exists(MY_NS, 'my_key2', b'my_value'))
assert was_set is True
# Try again. This time the key 'my_key2' already exists.
was_set = _try_func_return(lambda: mysdl.set_if_not_exists(MY_NS, 'my_key2', b'my_value'))
assert was_set is False
# Removes a key 'my_key' under given namespace.
_try_func_return(lambda: mysdl.remove(MY_NS, 'my_key'))
my_ret_dict = _try_func_return(lambda: mysdl.get(MY_NS, 'my_key'))
assert my_ret_dict == {}
# Removes a key 'my_key' under given namespace only if the old value is 'my_value'.
was_removed = _try_func_return(lambda: mysdl.remove_if(MY_NS, 'my_key2', b'my_value'))
assert was_removed is True
# Try again to remove not anymore existing key 'my_key'.
was_removed = _try_func_return(lambda: mysdl.remove_if(MY_NS, 'my_key2', b'my_value'))
assert was_removed is False
# Removes all the keys under given namespace.
_try_func_return(lambda: mysdl.set(MY_NS, {'my_key': b'something'}))
my_ret_dict = _try_func_return(lambda: mysdl.get(MY_NS, {'my_key'}))
assert my_ret_dict != {}
_try_func_return(lambda: mysdl.remove_all(MY_NS))
my_ret_dict = _try_func_return(lambda: mysdl.get(MY_NS, {'my_key'}))
assert my_ret_dict == {}
# Finds keys under given namespace that are matching to given key prefix 'my_k'.
_try_func_return(lambda: mysdl.set(MY_NS, {'my_key': b'my_value'}))
ret_keys = _try_func_return(lambda: mysdl.find_keys(MY_NS, 'my_k*'))
assert ret_keys == ['my_key']
# Finds keys and their values under given namespace that are matching to given key search
# pattern 'my_k*'.
# Note that the type of returned value is bytes.
ret_key_values = _try_func_return(lambda: mysdl.find_and_get(MY_NS, 'my_k*'))
assert ret_key_values == {'my_key': b'my_value'}
_try_func_return(lambda: mysdl.remove_all(MY_NS))
# Adds a member 'a' to a group 'my_group' under given namespace. A group is a unique collection of
# members.
# Note that member type must be bytes and multiple members can be set in one set function call.
_try_func_return(lambda: mysdl.add_member(MY_GRP_NS, 'my_group', {b'a'}))
# Try again to add a member 'a'. This time 'a' won't be added, because 'a' belongs already to
# the group.
_try_func_return(lambda: mysdl.add_member(MY_GRP_NS, 'my_group', {b'a'}))
# Gets group 'my_group' members under given namespace.
# Note that the type of returned member is bytes.
ret_members = _try_func_return(lambda: mysdl.get_members(MY_GRP_NS, 'my_group'))
assert ret_members == {b'a'}
# Checks if 'a' is a member of the group 'my_group' under given namespace.
was_member = _try_func_return(lambda: mysdl.is_member(MY_GRP_NS, 'my_group', b'a'))
assert was_member is True
was_member = _try_func_return(lambda: mysdl.is_member(MY_GRP_NS, 'my_group', b'not a member'))
assert was_member is False
# Returns the count of members of a group 'my_group' under given namespace.
ret_count = _try_func_return(lambda: mysdl.group_size(MY_GRP_NS, 'my_group'))
assert ret_count == 1
# Removes the member 'a' of the group 'my_group' under given namespace.
_try_func_return(lambda: mysdl.remove_member(MY_GRP_NS, 'my_group', {b'a', b'not exists'}))
ret_count = _try_func_return(lambda: mysdl.group_size(MY_GRP_NS, 'my_group'))
assert ret_count == 0
# Removes the group 'my_group' under given namespace.
_try_func_return(lambda: mysdl.add_member(MY_GRP_NS, 'my_group', {b'a', b'b', b'c'}))
ret_count = _try_func_return(lambda: mysdl.group_size(MY_GRP_NS, 'my_group'))
assert ret_count == 3
_try_func_return(lambda: mysdl.remove_group(MY_GRP_NS, 'my_group'))
ret_count = _try_func_return(lambda: mysdl.group_size(MY_GRP_NS, 'my_group'))
ret_members = _try_func_return(lambda: mysdl.get_members(MY_GRP_NS, 'my_group'))
assert ret_count == 0
assert ret_members == set()
# Gets a lock 'my_lock' resource under given namespace.
# Note that this function does not take a lock, you need to call 'acquire' function to take
# the lock to yourself.
my_lock = _try_func_return(lambda: mysdl.get_lock_resource(MY_LOCK_NS, "my_lock", expiration=5.5))
assert my_lock is not None
# Acquires a lock from the lock resource. Return True if lock was taken within given retry limits.
was_acquired = _try_func_return(lambda: my_lock.acquire(retry_interval=0.5, retry_timeout=2))
assert was_acquired is True
# Try again. This time a lock won't be acquired successfully, because we have a lock already.
was_acquired = _try_func_return(lambda: my_lock.acquire(retry_interval=0.1, retry_timeout=0.2))
assert was_acquired is False
# Refreshs the remaining validity time of the existing lock back to the initial value.
_try_func_return(my_lock.refresh)
# Gets the remaining validity time of the lock.
ret_time = _try_func_return(my_lock.get_validity_time)
assert ret_time != 0
# Releases the lock.
_try_func_return(my_lock.release)
# Locking example what utilizes python 'with' statement with SDL lock.
# The lock is released automatically when we are out of the scope of
# 'the with my_lock' statement.
my_lock = _try_func_return(lambda: mysdl.get_lock_resource(MY_LOCK_NS, "my_lock", 2.5))
with my_lock:
# Just an example how to use lock API
time_left = _try_func_return(my_lock.get_validity_time)
# Add here operations what needs to be done under a lock, for example some
# operations with a shared resources what needs to be done in a mutually
# exclusive way.
# Lock is not anymore hold here
# Closes the SDL connection.
mysdl.close()
|
PypiClean
|
/hops-petastorm-0.9.4.tar.gz/hops-petastorm-0.9.4/examples/mnist/pytorch_example.py
|
###
# Adapted to petastorm dataset using original contents from
# https://github.com/pytorch/examples/mnist/main.py .
###
from __future__ import division, print_function
import argparse
# Must import pyarrow before torch. See: https://github.com/uber/petastorm/blob/master/docs/troubleshoot.rst
import pyarrow # noqa: F401 pylint: disable=W0611
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from examples.mnist import DEFAULT_MNIST_DATA_PATH
from petastorm import make_reader, TransformSpec
from petastorm.pytorch import DataLoader
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
# pylint: disable=arguments-differ
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(model, device, train_loader, log_interval, optimizer, epoch):
model.train()
for batch_idx, row in enumerate(train_loader):
data, target = row['image'].to(device), row['digit'].to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), loss.item()))
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
count = 0
with torch.no_grad():
for row in test_loader:
data, target = row['image'].to(device), row['digit'].to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
count += data.shape[0]
test_loss /= count
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, count, 100. * correct / count))
def _transform_row(mnist_row):
# For this example, the images are stored as simpler ndarray (28,28), but the
# training network expects 3-dim images, hence the additional lambda transform.
transform = transforms.Compose([
transforms.Lambda(lambda nd: nd.reshape(28, 28, 1)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# In addition, the petastorm pytorch DataLoader does not distinguish the notion of
# data or target transform, but that actually gives the user more flexibility
# to make the desired partial transform, as shown here.
result_row = {
'image': transform(mnist_row['image']),
'digit': mnist_row['digit']
}
return result_row
def main():
# Training settings
parser = argparse.ArgumentParser(description='Petastorm MNIST Example')
default_dataset_url = 'file://{}'.format(DEFAULT_MNIST_DATA_PATH)
parser.add_argument('--dataset-url', type=str,
default=default_dataset_url, metavar='S',
help='hdfs:// or file:/// URL to the MNIST petastorm dataset '
'(default: %s)' % default_dataset_url)
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--all-epochs', action='store_true', default=False,
help='train all epochs before testing accuracy/loss')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device('cuda' if use_cuda else 'cpu')
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
# Configure loop and Reader epoch for illustrative purposes.
# Typical training usage would use the `all_epochs` approach.
#
if args.all_epochs:
# Run training across all the epochs before testing for accuracy
loop_epochs = 1
reader_epochs = args.epochs
else:
# Test training accuracy after each epoch
loop_epochs = args.epochs
reader_epochs = 1
transform = TransformSpec(_transform_row, removed_fields=['idx'])
# Instantiate each petastorm Reader with a single thread, shuffle enabled, and appropriate epoch setting
for epoch in range(1, loop_epochs + 1):
with DataLoader(make_reader('{}/train'.format(args.dataset_url), num_epochs=reader_epochs,
transform_spec=transform),
batch_size=args.batch_size) as train_loader:
train(model, device, train_loader, args.log_interval, optimizer, epoch)
with DataLoader(make_reader('{}/test'.format(args.dataset_url), num_epochs=reader_epochs,
transform_spec=transform),
batch_size=args.test_batch_size) as test_loader:
test(model, device, test_loader)
if __name__ == '__main__':
main()
|
PypiClean
|
/traktexport-0.1.4.tar.gz/traktexport-0.1.4/README.md
|
# traktexport
[](https://pypi.python.org/pypi/traktexport) [](https://pypi.python.org/pypi/traktexport) [](http://makeapullrequest.com)
Export your Movie/TV shows ratings and history from https://trakt.tv/
This isn't meant to be used to re-import info back into Trakt or export to another site, its just meant to save all my data so I have it locally, and can do analysis or graph my history.
## Installation
Requires `python3.7+`
To install with pip, run:
pip install traktexport
## Usage
```
Usage: traktexport [OPTIONS] COMMAND [ARGS]...
Export data from your Trakt account
Options:
--help Show this message and exit.
Commands:
auth setup authentication
export run an account export
inspect read/interact with an export file
merge merge multiple exports
partial_export run a partial export
```
### Auth
This uses OAuth to authenticate with the Trakt API (which afaik requires you to be a [VIP](https://trakt.tv/vip) on Trakt), see [here](https://pytrakt.readthedocs.io/en/latest/getstarted.html#oauth-auth) for more info.
This requires a manual setup the first time you use it, after which credentials are stored and this can run without any interaction.
### Setup
- Go to https://trakt.tv/oauth/applications and create a new application
- Use `urn:ietf:wg:oauth:2.0:oob` for the Redirect URI
- Run `traktexport auth yourTraktUsername`
- Follow the instructions, pasting in your Client ID/Secret from the Trakt dashboard, going to the link and pasting the generated pin back into the terminal
Once you've done that, this saves OAuth refresh info in `${XDG_DATA_HOME:-$HOME/.local/share}/traktexport.json` (can overwrite location with the `TRAKTEXPORT_CFG` environment variable)
### Export
Then, to export all your ratings/movies/shows, run:
`traktexport export yourTraktUsername > data.json`
The results are printed to STDOUT, so `> data.json` saves it to `data.json`
```
$ python3 -m traktexport export yourTraktUsername > data.json
[D 210326 18:42:43 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/followers'...
[D 210326 18:42:45 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/following'...
[D 210326 18:42:48 export:32] Requesting 'https://api-v2launch.trakt.tv/users/settings'...
[D 210326 18:42:51 export:32] Requesting 'https://api-v2launch.trakt.tv/users/likes'...
[D 210326 18:42:54 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername'...
[D 210326 18:42:56 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/comments'...
[D 210326 18:42:59 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/lists'...
[D 210326 18:43:01 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/ratings'...
[D 210326 18:43:05 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/recommendations'...
[D 210326 18:43:07 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/watchlist'...
[D 210326 18:43:10 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/watched/movies'...
[D 210326 18:43:13 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/watched/shows'...
[D 210326 18:43:21 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/collection/movies'...
[D 210326 18:43:23 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/collection/shows'...
[D 210326 18:43:26 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/stats'...
[D 210326 18:43:29 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/history?limit=100&page=1'...
[D 210326 18:43:31 export:44] First item: {'id': 7353545729, 'watched_at': '2021-03-22T06:33:24.000Z', 'action': 'watch', 'type': 'movie', 'movie': {'title': 'Rain Man', 'year': 1988, 'ids': {'trakt': 304, 'slug': 'rain-man-1988', 'imdb': 'tt0095953', 'tmdb': 380}}}
[D 210326 18:43:31 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/history?limit=100&page=2'...
[D 210326 18:43:34 export:44] First item: {'id': 7178301624, 'watched_at': '2021-01-23T04:25:15.000Z', 'action': 'watch', 'type': 'episode', 'episode': {'season': 7, 'number': 7, 'title': 'Dangerous Debt', 'ids': {'trakt': 2590748, 'tvdb': 7640571, 'imdb': 'tt9313956', 'tmdb': 2201892, 'tvrage': None}}, 'show': {'title': 'Star Wars: The Clone Wars', 'year': 2008, 'ids': {'trakt': 4170, 'slug': 'star-wars-the-clone-wars', 'tvdb': 83268, 'imdb': 'tt0458290', 'tmdb': 4194, 'tvrage': 19187}}}
[D 210326 18:43:34 export:32] Requesting 'https://api-v2launch.trakt.tv/users/yourTraktUsername/history?limit=100&page=3'...
```
#### Partial Export
You can also export a part of your recent history, instead of your entire history (as that tends to take a few minutes)
```
traktexport partial_export --help
Usage: traktexport partial_export [OPTIONS] USERNAME
Run a partial history export - assumes authentication has already
been setup
This exports your movie/TV show history from Trakt without all
the other attributes. You can specify --pages to only request the
first few pages so this doesn't take ages to run.
The 'merge' command takes multiple partial exports (or full
exports) and merges them all together into a complete history
Options:
--pages INTEGER Only request these many pages of your history
--help Show this message and exit.
```
E.g. To export your most recent 100 watches, you can run `traktexport partial_export yourTraktUsername --pages 1`
Those can then all be combined by the `merge` command, like: `traktexport merge ~/data/trakt/*.json`
To do that in python, you can also do:
```
from traktexport.merge import read_and_merge_exports
read_and_merge_exports(["full_export.json", "partial_export.json"])
```
### Inspect
[`traktexport.dal`](./traktexport/dal.py) includes some code I use to parse the resulting JSON file into Python objects so its easier to manipulate
```python
class TraktExport(NamedTuple):
username: str
followers: List[Follow]
following: List[Follow]
likes: List[Like]
stats: Dict[str, Any]
settings: Dict[str, Any]
watchlist: List[WatchListEntry]
ratings: List[Rating]
history: List[HistoryEntry]
```
```
python3 -m traktexport inspect data.json
Use 'data' to interact with the parsed TraktExport object
In [1]: data.history[0]
Out[1]: HistoryEntry(history_id=7353545729, watched_at=datetime.datetime(2021, 3, 22, 13, 33, 24, tzinfo=datetime.timezone.utc), action='watch', media_type='movie', media_data=Movie(title='Rain Man', year=1988, ids=SiteIds(trakt_id=304, trakt_slug='rain-man-1988', imdb_id='tt0095953', tmdb_id=380, tvdb_id=None, tvrage_id=None)))
In [2]: len(data.history)
Out[2]: 16063
In [3]: data.stats["movies"]["plays"]
Out[3]: 1511
```
Note: This does include this info the export, but it doesn't currently parse:
- collection
- comments
- lists
- recommendations
... because I don't have any of those on trakt. If you use those, a PR would be appreciated!
Created to use as part of [`HPI`](https://github.com/seanbreckenridge/HPI)
|
PypiClean
|
/django-fluent-contents-3.0.tar.gz/django-fluent-contents-3.0/fluent_contents/rendering/main.py
|
from django.core.cache import cache
from django.utils.safestring import mark_safe
from fluent_contents.cache import get_placeholder_cache_key_for_parent
from fluent_contents.models import ContentItemOutput, get_parent_language_code
from . import markers
from .core import PlaceholderRenderingPipe, RenderingPipe
from .search import SearchRenderingPipe
def get_cached_placeholder_output(parent_object, placeholder_name):
"""
Return cached output for a placeholder, if available.
This avoids fetching the Placeholder object.
"""
if not PlaceholderRenderingPipe.may_cache_placeholders():
return None
language_code = get_parent_language_code(parent_object)
cache_key = get_placeholder_cache_key_for_parent(
parent_object, placeholder_name, language_code
)
return cache.get(cache_key)
def render_placeholder(
request,
placeholder,
parent_object=None,
template_name=None,
cachable=None,
limit_parent_language=True,
fallback_language=None,
):
"""
Render a :class:`~fluent_contents.models.Placeholder` object.
Returns a :class:`~fluent_contents.models.ContentItemOutput` object
which contains the HTML output and :class:`~django.forms.Media` object.
This function also caches the complete output of the placeholder
when all individual items are cacheable.
:param request: The current request object.
:type request: :class:`~django.http.HttpRequest`
:param placeholder: The placeholder object.
:type placeholder: :class:`~fluent_contents.models.Placeholder`
:param parent_object: Optional, the parent object of the placeholder (already implied by the placeholder)
:param template_name: Optional template name used to concatenate the placeholder output.
:type template_name: str | None
:param cachable: Whether the output is cachable, otherwise the full output will not be cached.
Default: False when using a template, True otherwise.
:type cachable: bool | None
:param limit_parent_language: Whether the items should be limited to the parent language.
:type limit_parent_language: bool
:param fallback_language: The fallback language to use if there are no items in the current language. Passing ``True`` uses the default :ref:`FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE`.
:type fallback_language: bool/str
:rtype: :class:`~fluent_contents.models.ContentItemOutput`
"""
output = PlaceholderRenderingPipe(request).render_placeholder(
placeholder=placeholder,
parent_object=parent_object,
template_name=template_name,
cachable=cachable,
limit_parent_language=limit_parent_language,
fallback_language=fallback_language,
)
# Wrap the result after it's stored in the cache.
if markers.is_edit_mode(request):
output.html = markers.wrap_placeholder_output(output.html, placeholder)
return output
def render_content_items(request, items, template_name=None, cachable=None):
"""
Render a list of :class:`~fluent_contents.models.ContentItem` objects as HTML string.
This is a variation of the :func:`render_placeholder` function.
Note that the items are not filtered in any way by parent or language.
The items are rendered as-is.
:param request: The current request object.
:type request: :class:`~django.http.HttpRequest`
:param items: The list or queryset of objects to render. Passing a queryset is preferred.
:type items: list or queryset of :class:`~fluent_contents.models.ContentItem`.
:param template_name: Optional template name used to concatenate the placeholder output.
:type template_name: Optional[str]
:param cachable: Whether the output is cachable, otherwise the full output will not be cached.
Default: False when using a template, True otherwise.
:type cachable: Optional[bool]
:rtype: :class:`~fluent_contents.models.ContentItemOutput`
"""
if not items:
output = ContentItemOutput(mark_safe("<!-- no items to render -->"))
else:
output = RenderingPipe(request).render_items(
placeholder=None,
items=items,
parent_object=None,
template_name=template_name,
cachable=cachable,
)
# Wrap the result after it's stored in the cache.
if markers.is_edit_mode(request):
output.html = markers.wrap_anonymous_output(output.html)
return output
def render_placeholder_search_text(placeholder, fallback_language=None):
"""
Render a :class:`~fluent_contents.models.Placeholder` object to search text.
This text can be used by an indexer (e.g. haystack) to produce content search for a parent object.
:param placeholder: The placeholder object.
:type placeholder: :class:`~fluent_contents.models.Placeholder`
:param fallback_language: The fallback language to use if there are no items in the current language.
Passing ``True`` uses the default :ref:`FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE`.
:type fallback_language: bool|str
:rtype: str
"""
# placeholder.parent is a cached lookup thanks to PlaceholderFieldDescriptor
parent_object = placeholder.parent
language = get_parent_language_code(parent_object)
output = SearchRenderingPipe(language).render_placeholder(
placeholder=placeholder,
parent_object=parent_object,
fallback_language=fallback_language,
)
return output.html # Tags already stripped.
|
PypiClean
|
/GetOptions-1.0.3.tar.gz/GetOptions-1.0.3/README.en.md
|
*************************************
# Get options
> Gets options from the console command, Format the options and return a dict. This is similar to the way non-GNU Unix systems work.
------------------------------
## Install:
```bash
pip install GetOptions
```
------------------------------
## System requirements
Python >= 3
------------------------------
## Example
```python
import GetOptions
params_config = {
'host': {'must': False, 'data': True, 'short': 'H', 'long': 'host', 'default': 'localhost'},
'port': {'must': False, 'data': True, 'short': 'O', 'long': 'port', 'default': 3306},
'user': {'must': True, 'data': True, 'short': 'U', 'long': 'user'},
'passwd': {'must': True, 'data': True, 'short': 'P', 'long': 'passwd'},
'db': {'must': True, 'data': True, 'short': 'D', 'long': 'db'},
'init': {'must': True, 'data': False, 'short': 'I', 'long': 'init'},
}
print(GetOptions.get(params_config))
```
+ Shell command:
- `python3 test.py -H localhost -U root -P abc123 -D thinkvue -I abc 123`
+ Print result:
- `{'data': {'host': 'localhost', 'port': 3306, 'user': 'root', 'passwd': 'abc123', 'db': 'thinkvue' , 'init': True}, 'args': ["abc", "123"]}`
## Parameter Description
`GetOptions.get(params_config, params=None, is_show_help=True)`
- `params_config`:<Required>{type:`dict`} A dict used to describe parameters, each primary key has 5 fields:
+ `key`:{type:`string`} The primary key in the result returned
+ `must`:{type:`bool`} This is a required option?
+ `data`:{type:`bool`} Does it have member data?
+ `short`:{type:`string`} The short parameter, example:`-s`
+ `long`:{type:`string`} The long parameter, example:`--longParam`
+ `default`:{type:`string`} Default value
- `params`:[optional]{type:`list`} Default `sys.argv`
- `is_show_help`:[optional]{type:`bool`} Show help?
## LICENSE
[MIT](./LICENSE)
|
PypiClean
|
/openpeerpower_frontend-20210523.2-py3-none-any.whl/opp_frontend/frontend_es5/chunk.a6a05e49b5487d486578.js
|
(self.webpackChunkopenpeerpower_frontend=self.webpackChunkopenpeerpower_frontend||[]).push([[6051],{74015:function(t,e,n){"use strict";n.d(e,{j2:function(){return r},UX:function(){return i},KT:function(){return o}});var i={LIST_ITEM_ACTIVATED_CLASS:"mdc-list-item--activated",LIST_ITEM_CLASS:"mdc-list-item",LIST_ITEM_DISABLED_CLASS:"mdc-list-item--disabled",LIST_ITEM_SELECTED_CLASS:"mdc-list-item--selected",LIST_ITEM_TEXT_CLASS:"mdc-list-item__text",LIST_ITEM_PRIMARY_TEXT_CLASS:"mdc-list-item__primary-text",ROOT:"mdc-list"},r={ACTION_EVENT:"MDCList:action",ARIA_CHECKED:"aria-checked",ARIA_CHECKED_CHECKBOX_SELECTOR:'[role="checkbox"][aria-checked="true"]',ARIA_CHECKED_RADIO_SELECTOR:'[role="radio"][aria-checked="true"]',ARIA_CURRENT:"aria-current",ARIA_DISABLED:"aria-disabled",ARIA_ORIENTATION:"aria-orientation",ARIA_ORIENTATION_HORIZONTAL:"horizontal",ARIA_ROLE_CHECKBOX_SELECTOR:'[role="checkbox"]',ARIA_SELECTED:"aria-selected",CHECKBOX_RADIO_SELECTOR:'input[type="checkbox"], input[type="radio"]',CHECKBOX_SELECTOR:'input[type="checkbox"]',CHILD_ELEMENTS_TO_TOGGLE_TABINDEX:"\n ."+i.LIST_ITEM_CLASS+" button:not(:disabled),\n ."+i.LIST_ITEM_CLASS+" a\n ",FOCUSABLE_CHILD_ELEMENTS:"\n ."+i.LIST_ITEM_CLASS+" button:not(:disabled),\n ."+i.LIST_ITEM_CLASS+" a,\n ."+i.LIST_ITEM_CLASS+' input[type="radio"]:not(:disabled),\n .'+i.LIST_ITEM_CLASS+' input[type="checkbox"]:not(:disabled)\n ',RADIO_SELECTOR:'input[type="radio"]'},o={UNSET_INDEX:-1,TYPEAHEAD_BUFFER_CLEAR_TIMEOUT_MS:300}},81689:function(t,e,n){"use strict";var i=n(36220),r=n(15652),o=(n(66702),n(14114)),a=n(98734),s=n(81471);function c(t){return(c="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function l(t,e){var n;if("undefined"==typeof Symbol||null==t[Symbol.iterator]){if(Array.isArray(t)||(n=function(t,e){if(!t)return;if("string"==typeof t)return d(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);"Object"===n&&t.constructor&&(n=t.constructor.name);if("Map"===n||"Set"===n)return Array.from(t);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return d(t,e)}(t))||e&&t&&"number"==typeof t.length){n&&(t=n);var i=0,r=function(){};return{s:r,n:function(){return i>=t.length?{done:!0}:{done:!1,value:t[i++]}},e:function(t){throw t},f:r}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var o,a=!0,s=!1;return{s:function(){n=t[Symbol.iterator]()},n:function(){var t=n.next();return a=t.done,t},e:function(t){s=!0,o=t},f:function(){try{a||null==n.return||n.return()}finally{if(s)throw o}}}}function d(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,i=new Array(e);n<e;n++)i[n]=t[n];return i}function u(){var t=x(['\n <span class="mdc-list-item__primary-text">\n <slot></slot>\n </span>\n <span class="mdc-list-item__secondary-text">\n <slot name="secondary"></slot>\n </span>\n ']);return u=function(){return t},t}function f(){var t=x(["<slot></slot>"]);return f=function(){return t},t}function p(){var t=x(['\n <span class="mdc-list-item__text">\n ',"\n </span>"]);return p=function(){return t},t}function m(){var t=x(['\n <span class="mdc-list-item__meta material-icons">\n <slot name="meta"></slot>\n </span>']);return m=function(){return t},t}function h(){var t=x(['\n <span class="mdc-list-item__graphic material-icons ','">\n <slot name="graphic"></slot>\n </span>']);return h=function(){return t},t}function y(){var t=x(['<div class="fake-activated-ripple"></div>']);return y=function(){return t},t}function v(){var t=x(["\n <mwc-ripple\n .activated=",">\n </mwc-ripple>"]);return v=function(){return t},t}function g(){var t=x(["\n ","\n ","\n ","\n ",""]);return g=function(){return t},t}function b(){var t=x([""]);return b=function(){return t},t}function _(){var t=x([""]);return _=function(){return t},t}function x(t,e){return e||(e=t.slice(0)),Object.freeze(Object.defineProperties(t,{raw:{value:Object.freeze(e)}}))}function I(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function S(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function w(t,e,n){return(w="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(t,e,n){var i=function(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=k(t)););return t}(t,e);if(i){var r=Object.getOwnPropertyDescriptor(i,e);return r.get?r.get.call(n):r.value}})(t,e,n||t)}function E(t,e){return(E=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function A(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,i=k(t);if(e){var r=k(this).constructor;n=Reflect.construct(i,arguments,r)}else n=i.apply(this,arguments);return R(this,n)}}function R(t,e){return!e||"object"!==c(e)&&"function"!=typeof e?T(t):e}function T(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}function k(t){return(k=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var O=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&E(t,e)}(c,t);var e,n,i,o=A(c);function c(){var t;return I(this,c),(t=o.apply(this,arguments)).value="",t.group=null,t.tabindex=-1,t.disabled=!1,t.twoline=!1,t.activated=!1,t.graphic=null,t.multipleGraphics=!1,t.hasMeta=!1,t.noninteractive=!1,t.selected=!1,t.shouldRenderRipple=!1,t._managingList=null,t.boundOnClick=t.onClick.bind(T(t)),t._firstChanged=!0,t._skipPropRequest=!1,t.rippleHandlers=new a.A((function(){return t.shouldRenderRipple=!0,t.ripple})),t.listeners=[{target:T(t),eventNames:["click"],cb:function(){t.onClick()}},{target:T(t),eventNames:["mouseenter"],cb:t.rippleHandlers.startHover},{target:T(t),eventNames:["mouseleave"],cb:t.rippleHandlers.endHover},{target:T(t),eventNames:["focus"],cb:t.rippleHandlers.startFocus},{target:T(t),eventNames:["blur"],cb:t.rippleHandlers.endFocus},{target:T(t),eventNames:["mousedown","touchstart"],cb:function(e){var n=e.type;t.onDown("mousedown"===n?"mouseup":"touchend",e)}}],t}return e=c,(n=[{key:"render",value:function(){var t=this.renderText(),e=this.graphic?this.renderGraphic():(0,r.dy)(_()),n=this.hasMeta?this.renderMeta():(0,r.dy)(b());return(0,r.dy)(g(),this.renderRipple(),e,t,n)}},{key:"renderRipple",value:function(){return this.shouldRenderRipple?(0,r.dy)(v(),this.activated):this.activated?(0,r.dy)(y()):""}},{key:"renderGraphic",value:function(){var t={multi:this.multipleGraphics};return(0,r.dy)(h(),(0,s.$)(t))}},{key:"renderMeta",value:function(){return(0,r.dy)(m())}},{key:"renderText",value:function(){var t=this.twoline?this.renderTwoline():this.renderSingleLine();return(0,r.dy)(p(),t)}},{key:"renderSingleLine",value:function(){return(0,r.dy)(f())}},{key:"renderTwoline",value:function(){return(0,r.dy)(u())}},{key:"onClick",value:function(){this.fireRequestSelected(!this.selected,"interaction")}},{key:"onDown",value:function(t,e){var n=this;window.addEventListener(t,(function e(){window.removeEventListener(t,e),n.rippleHandlers.endPress()})),this.rippleHandlers.startPress(e)}},{key:"fireRequestSelected",value:function(t,e){if(!this.noninteractive){var n=new CustomEvent("request-selected",{bubbles:!0,composed:!0,detail:{source:e,selected:t}});this.dispatchEvent(n)}}},{key:"connectedCallback",value:function(){w(k(c.prototype),"connectedCallback",this).call(this),this.noninteractive||this.setAttribute("mwc-list-item","");var t,e=l(this.listeners);try{for(e.s();!(t=e.n()).done;){var n,i=t.value,r=l(i.eventNames);try{for(r.s();!(n=r.n()).done;){var o=n.value;i.target.addEventListener(o,i.cb,{passive:!0})}}catch(a){r.e(a)}finally{r.f()}}}catch(a){e.e(a)}finally{e.f()}}},{key:"disconnectedCallback",value:function(){w(k(c.prototype),"disconnectedCallback",this).call(this);var t,e=l(this.listeners);try{for(e.s();!(t=e.n()).done;){var n,i=t.value,r=l(i.eventNames);try{for(r.s();!(n=r.n()).done;){var o=n.value;i.target.removeEventListener(o,i.cb)}}catch(a){r.e(a)}finally{r.f()}}}catch(a){e.e(a)}finally{e.f()}this._managingList&&(this._managingList.debouncedLayout?this._managingList.debouncedLayout(!0):this._managingList.layout(!0))}},{key:"firstUpdated",value:function(){var t=new Event("list-item-rendered",{bubbles:!0,composed:!0});this.dispatchEvent(t)}},{key:"text",get:function(){var t=this.textContent;return t?t.trim():""}}])&&S(e.prototype,n),i&&S(e,i),c}(r.oi);function C(){var t=function(t,e){e||(e=t.slice(0));return Object.freeze(Object.defineProperties(t,{raw:{value:Object.freeze(e)}}))}([':host{cursor:pointer;user-select:none;-webkit-tap-highlight-color:transparent;height:48px;display:flex;position:relative;align-items:center;justify-content:flex-start;overflow:hidden;padding:0;padding-left:var(--mdc-list-side-padding, 16px);padding-right:var(--mdc-list-side-padding, 16px);outline:none;height:48px;color:rgba(0,0,0,.87);color:var(--mdc-theme-text-primary-on-background, rgba(0, 0, 0, 0.87))}:host:focus{outline:none}:host([activated]){color:#6200ee;color:var(--mdc-theme-primary, #6200ee);--mdc-ripple-color: var(--mdc-theme-primary, #6200ee)}:host([activated]) .mdc-list-item__graphic{color:#6200ee;color:var(--mdc-theme-primary, #6200ee)}:host([activated]) .fake-activated-ripple::before{position:absolute;display:block;top:0;bottom:0;left:0;right:0;width:100%;height:100%;pointer-events:none;z-index:1;content:"";opacity:0.12;opacity:var(--mdc-ripple-activated-opacity, 0.12);background-color:#6200ee;background-color:var(--mdc-ripple-color, var(--mdc-theme-primary, #6200ee))}.mdc-list-item__graphic{flex-shrink:0;align-items:center;justify-content:center;fill:currentColor;display:inline-flex}.mdc-list-item__graphic ::slotted(*){flex-shrink:0;align-items:center;justify-content:center;fill:currentColor;width:100%;height:100%;text-align:center}.mdc-list-item__meta{width:var(--mdc-list-item-meta-size, 24px);height:var(--mdc-list-item-meta-size, 24px);margin-left:auto;margin-right:0;color:rgba(0, 0, 0, 0.38);color:var(--mdc-theme-text-hint-on-background, rgba(0, 0, 0, 0.38))}.mdc-list-item__meta.multi{width:auto}.mdc-list-item__meta ::slotted(*){width:var(--mdc-list-item-meta-size, 24px);line-height:var(--mdc-list-item-meta-size, 24px)}.mdc-list-item__meta ::slotted(.material-icons),.mdc-list-item__meta ::slotted(mwc-icon){line-height:var(--mdc-list-item-meta-size, 24px) !important}.mdc-list-item__meta ::slotted(:not(.material-icons):not(mwc-icon)){-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;font-family:Roboto, sans-serif;font-family:var(--mdc-typography-caption-font-family, var(--mdc-typography-font-family, Roboto, sans-serif));font-size:0.75rem;font-size:var(--mdc-typography-caption-font-size, 0.75rem);line-height:1.25rem;line-height:var(--mdc-typography-caption-line-height, 1.25rem);font-weight:400;font-weight:var(--mdc-typography-caption-font-weight, 400);letter-spacing:0.0333333333em;letter-spacing:var(--mdc-typography-caption-letter-spacing, 0.0333333333em);text-decoration:inherit;text-decoration:var(--mdc-typography-caption-text-decoration, inherit);text-transform:inherit;text-transform:var(--mdc-typography-caption-text-transform, inherit)}:host[dir=rtl] .mdc-list-item__meta,[dir=rtl] :host .mdc-list-item__meta{margin-left:0;margin-right:auto}.mdc-list-item__meta ::slotted(*){width:100%;height:100%}.mdc-list-item__text{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.mdc-list-item__text ::slotted([for]),.mdc-list-item__text[for]{pointer-events:none}.mdc-list-item__primary-text{text-overflow:ellipsis;white-space:nowrap;overflow:hidden;display:block;margin-top:0;line-height:normal;margin-bottom:-20px;display:block}.mdc-list-item__primary-text::before{display:inline-block;width:0;height:32px;content:"";vertical-align:0}.mdc-list-item__primary-text::after{display:inline-block;width:0;height:20px;content:"";vertical-align:-20px}.mdc-list-item__secondary-text{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;font-family:Roboto, sans-serif;font-family:var(--mdc-typography-body2-font-family, var(--mdc-typography-font-family, Roboto, sans-serif));font-size:0.875rem;font-size:var(--mdc-typography-body2-font-size, 0.875rem);line-height:1.25rem;line-height:var(--mdc-typography-body2-line-height, 1.25rem);font-weight:400;font-weight:var(--mdc-typography-body2-font-weight, 400);letter-spacing:0.0178571429em;letter-spacing:var(--mdc-typography-body2-letter-spacing, 0.0178571429em);text-decoration:inherit;text-decoration:var(--mdc-typography-body2-text-decoration, inherit);text-transform:inherit;text-transform:var(--mdc-typography-body2-text-transform, inherit);text-overflow:ellipsis;white-space:nowrap;overflow:hidden;display:block;margin-top:0;line-height:normal;display:block}.mdc-list-item__secondary-text::before{display:inline-block;width:0;height:20px;content:"";vertical-align:0}.mdc-list--dense .mdc-list-item__secondary-text{font-size:inherit}* ::slotted(a),a{color:inherit;text-decoration:none}:host([twoline]){height:72px}:host([twoline]) .mdc-list-item__text{align-self:flex-start}:host([disabled]),:host([noninteractive]){cursor:default;pointer-events:none}:host([disabled]) .mdc-list-item__text ::slotted(*){opacity:.38}:host([disabled]) .mdc-list-item__text ::slotted(*),:host([disabled]) .mdc-list-item__primary-text ::slotted(*),:host([disabled]) .mdc-list-item__secondary-text ::slotted(*){color:#000;color:var(--mdc-theme-on-surface, #000)}.mdc-list-item__secondary-text ::slotted(*){color:rgba(0, 0, 0, 0.54);color:var(--mdc-theme-text-secondary-on-background, rgba(0, 0, 0, 0.54))}.mdc-list-item__graphic ::slotted(*){background-color:transparent;color:rgba(0, 0, 0, 0.38);color:var(--mdc-theme-text-icon-on-background, rgba(0, 0, 0, 0.38))}.mdc-list-group__subheader ::slotted(*){color:rgba(0, 0, 0, 0.87);color:var(--mdc-theme-text-primary-on-background, rgba(0, 0, 0, 0.87))}:host([graphic=avatar]) .mdc-list-item__graphic{width:var(--mdc-list-item-graphic-size, 40px);height:var(--mdc-list-item-graphic-size, 40px)}:host([graphic=avatar]) .mdc-list-item__graphic.multi{width:auto}:host([graphic=avatar]) .mdc-list-item__graphic ::slotted(*){width:var(--mdc-list-item-graphic-size, 40px);line-height:var(--mdc-list-item-graphic-size, 40px)}:host([graphic=avatar]) .mdc-list-item__graphic ::slotted(.material-icons),:host([graphic=avatar]) .mdc-list-item__graphic ::slotted(mwc-icon){line-height:var(--mdc-list-item-graphic-size, 40px) !important}:host([graphic=avatar]) .mdc-list-item__graphic ::slotted(*){border-radius:50%}:host([graphic=avatar],[graphic=medium],[graphic=large],[graphic=control]) .mdc-list-item__graphic{margin-left:0;margin-right:var(--mdc-list-item-graphic-margin, 16px)}:host[dir=rtl] :host([graphic=avatar],[graphic=medium],[graphic=large],[graphic=control]) .mdc-list-item__graphic,[dir=rtl] :host :host([graphic=avatar],[graphic=medium],[graphic=large],[graphic=control]) .mdc-list-item__graphic{margin-left:var(--mdc-list-item-graphic-margin, 16px);margin-right:0}:host([graphic=icon]) .mdc-list-item__graphic{width:var(--mdc-list-item-graphic-size, 24px);height:var(--mdc-list-item-graphic-size, 24px);margin-left:0;margin-right:var(--mdc-list-item-graphic-margin, 32px)}:host([graphic=icon]) .mdc-list-item__graphic.multi{width:auto}:host([graphic=icon]) .mdc-list-item__graphic ::slotted(*){width:var(--mdc-list-item-graphic-size, 24px);line-height:var(--mdc-list-item-graphic-size, 24px)}:host([graphic=icon]) .mdc-list-item__graphic ::slotted(.material-icons),:host([graphic=icon]) .mdc-list-item__graphic ::slotted(mwc-icon){line-height:var(--mdc-list-item-graphic-size, 24px) !important}:host[dir=rtl] :host([graphic=icon]) .mdc-list-item__graphic,[dir=rtl] :host :host([graphic=icon]) .mdc-list-item__graphic{margin-left:var(--mdc-list-item-graphic-margin, 32px);margin-right:0}:host([graphic=avatar]:not([twoLine])),:host([graphic=icon]:not([twoLine])){height:56px}:host([graphic=medium]:not([twoLine])),:host([graphic=large]:not([twoLine])){height:72px}:host([graphic=medium]) .mdc-list-item__graphic,:host([graphic=large]) .mdc-list-item__graphic{width:var(--mdc-list-item-graphic-size, 56px);height:var(--mdc-list-item-graphic-size, 56px)}:host([graphic=medium]) .mdc-list-item__graphic.multi,:host([graphic=large]) .mdc-list-item__graphic.multi{width:auto}:host([graphic=medium]) .mdc-list-item__graphic ::slotted(*),:host([graphic=large]) .mdc-list-item__graphic ::slotted(*){width:var(--mdc-list-item-graphic-size, 56px);line-height:var(--mdc-list-item-graphic-size, 56px)}:host([graphic=medium]) .mdc-list-item__graphic ::slotted(.material-icons),:host([graphic=medium]) .mdc-list-item__graphic ::slotted(mwc-icon),:host([graphic=large]) .mdc-list-item__graphic ::slotted(.material-icons),:host([graphic=large]) .mdc-list-item__graphic ::slotted(mwc-icon){line-height:var(--mdc-list-item-graphic-size, 56px) !important}:host([graphic=large]){padding-left:0px}']);return C=function(){return t},t}(0,i.gn)([(0,r.IO)("slot")],O.prototype,"slotElement",void 0),(0,i.gn)([(0,r.GC)("mwc-ripple")],O.prototype,"ripple",void 0),(0,i.gn)([(0,r.Cb)({type:String})],O.prototype,"value",void 0),(0,i.gn)([(0,r.Cb)({type:String,reflect:!0})],O.prototype,"group",void 0),(0,i.gn)([(0,r.Cb)({type:Number,reflect:!0})],O.prototype,"tabindex",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean,reflect:!0}),(0,o.P)((function(t){t?this.setAttribute("aria-disabled","true"):this.setAttribute("aria-disabled","false")}))],O.prototype,"disabled",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean,reflect:!0})],O.prototype,"twoline",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean,reflect:!0})],O.prototype,"activated",void 0),(0,i.gn)([(0,r.Cb)({type:String,reflect:!0})],O.prototype,"graphic",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean})],O.prototype,"multipleGraphics",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean})],O.prototype,"hasMeta",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean,reflect:!0}),(0,o.P)((function(t){t?(this.removeAttribute("aria-checked"),this.removeAttribute("mwc-list-item"),this.selected=!1,this.activated=!1,this.tabIndex=-1):this.setAttribute("mwc-list-item","")}))],O.prototype,"noninteractive",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean,reflect:!0}),(0,o.P)((function(t){var e=this.getAttribute("role"),n="gridcell"===e||"option"===e||"row"===e||"tab"===e;n&&t?this.setAttribute("aria-selected","true"):n&&this.setAttribute("aria-selected","false"),this._firstChanged?this._firstChanged=!1:this._skipPropRequest||this.fireRequestSelected(t,"property")}))],O.prototype,"selected",void 0),(0,i.gn)([(0,r.sz)()],O.prototype,"shouldRenderRipple",void 0),(0,i.gn)([(0,r.sz)()],O.prototype,"_managingList",void 0);var F=(0,r.iv)(C());function L(t){return(L="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function j(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function D(t,e){return(D=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function P(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,i=z(t);if(e){var r=z(this).constructor;n=Reflect.construct(i,arguments,r)}else n=i.apply(this,arguments);return N(this,n)}}function N(t,e){return!e||"object"!==L(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function z(t){return(z=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var M=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&D(t,e)}(n,t);var e=P(n);function n(){return j(this,n),e.apply(this,arguments)}return n}(O);M.styles=F,M=(0,i.gn)([(0,r.Mo)("mwc-list-item")],M)},36051:function(t,e,n){"use strict";var i=n(36220),r=n(15652),o=(n(81689),n(78220)),a=n(14114),s=n(82612),c=n(49629),l=n(72774),d="Unknown",u="Backspace",f="Enter",p="Spacebar",m="PageUp",h="PageDown",y="End",v="Home",g="ArrowLeft",b="ArrowUp",_="ArrowRight",x="ArrowDown",I="Delete",S="Escape",w=new Set;w.add(u),w.add(f),w.add(p),w.add(m),w.add(h),w.add(y),w.add(v),w.add(g),w.add(b),w.add(_),w.add(x),w.add(I),w.add(S);var E=8,A=13,R=32,T=33,k=34,O=35,C=36,F=37,L=38,j=39,D=40,P=46,N=27,z=new Map;z.set(E,u),z.set(A,f),z.set(R,p),z.set(T,m),z.set(k,h),z.set(O,y),z.set(C,v),z.set(F,g),z.set(L,b),z.set(j,_),z.set(D,x),z.set(P,I),z.set(N,S);var M=new Set;function U(t){var e=t.key;if(w.has(e))return e;var n=z.get(t.keyCode);return n||d}M.add(m),M.add(h),M.add(y),M.add(v),M.add(g),M.add(b),M.add(_),M.add(x);var K=n(74015);function H(t){return(H="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function B(t){return function(t){if(Array.isArray(t))return q(t)}(t)||function(t){if("undefined"!=typeof Symbol&&Symbol.iterator in Object(t))return Array.from(t)}(t)||V(t)||function(){throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function X(t,e){var n;if("undefined"==typeof Symbol||null==t[Symbol.iterator]){if(Array.isArray(t)||(n=V(t))||e&&t&&"number"==typeof t.length){n&&(t=n);var i=0,r=function(){};return{s:r,n:function(){return i>=t.length?{done:!0}:{done:!1,value:t[i++]}},e:function(t){throw t},f:r}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var o,a=!0,s=!1;return{s:function(){n=t[Symbol.iterator]()},n:function(){var t=n.next();return a=t.done,t},e:function(t){s=!0,o=t},f:function(){try{a||null==n.return||n.return()}finally{if(s)throw o}}}}function V(t,e){if(t){if("string"==typeof t)return q(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);return"Object"===n&&t.constructor&&(n=t.constructor.name),"Map"===n||"Set"===n?Array.from(t):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?q(t,e):void 0}}function q(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,i=new Array(e);n<e;n++)i[n]=t[n];return i}function G(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function $(t,e){return($=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function W(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,i=Z(t);if(e){var r=Z(this).constructor;n=Reflect.construct(i,arguments,r)}else n=i.apply(this,arguments);return Y(this,n)}}function Y(t,e){return!e||"object"!==H(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function Z(t){return(Z=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var J=function(t,e){for(var n=Array.from(t),i=Array.from(e),r={added:[],removed:[]},o=n.sort(),a=i.sort(),s=0,c=0;s<o.length||c<a.length;){var l=o[s],d=a[c];l!==d?void 0!==l&&(void 0===d||l<d)?(r.removed.push(l),s++):void 0!==d&&(void 0===l||d<l)&&(r.added.push(d),c++):(s++,c++)}return r},Q=["input","button","textarea","select"];function tt(t){return t instanceof Set}var et=function(t){var e=t===K.KT.UNSET_INDEX?new Set:t;return tt(e)?new Set(e):new Set([e])},nt=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&$(t,e)}(o,t);var e,n,i,r=W(o);function o(t){var e;return function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,o),(e=r.call(this,Object.assign(Object.assign({},o.defaultAdapter),t))).isMulti_=!1,e.wrapFocus_=!1,e.isVertical_=!0,e.selectedIndex_=K.KT.UNSET_INDEX,e.focusedItemIndex_=K.KT.UNSET_INDEX,e.useActivatedClass_=!1,e.ariaCurrentAttrValue_=null,e}return e=o,i=[{key:"strings",get:function(){return K.j2}},{key:"numbers",get:function(){return K.KT}},{key:"defaultAdapter",get:function(){return{focusItemAtIndex:function(){},getFocusedElementIndex:function(){return 0},getListItemCount:function(){return 0},isFocusInsideList:function(){return!1},isRootFocused:function(){return!1},notifyAction:function(){},notifySelected:function(){},getSelectedStateForElementIndex:function(){return!1},setDisabledStateForElementIndex:function(){},getDisabledStateForElementIndex:function(){return!1},setSelectedStateForElementIndex:function(){},setActivatedStateForElementIndex:function(){},setTabIndexForElementIndex:function(){},setAttributeForElementIndex:function(){},getAttributeForElementIndex:function(){return null}}}}],(n=[{key:"setWrapFocus",value:function(t){this.wrapFocus_=t}},{key:"setMulti",value:function(t){this.isMulti_=t;var e=this.selectedIndex_;if(t){if(!tt(e)){var n=e===K.KT.UNSET_INDEX;this.selectedIndex_=n?new Set:new Set([e])}}else if(tt(e))if(e.size){var i=Array.from(e).sort();this.selectedIndex_=i[0]}else this.selectedIndex_=K.KT.UNSET_INDEX}},{key:"setVerticalOrientation",value:function(t){this.isVertical_=t}},{key:"setUseActivatedClass",value:function(t){this.useActivatedClass_=t}},{key:"getSelectedIndex",value:function(){return this.selectedIndex_}},{key:"setSelectedIndex",value:function(t){this.isIndexValid_(t)&&(this.isMulti_?this.setMultiSelectionAtIndex_(et(t)):this.setSingleSelectionAtIndex_(t))}},{key:"handleFocusIn",value:function(t,e){e>=0&&this.adapter.setTabIndexForElementIndex(e,0)}},{key:"handleFocusOut",value:function(t,e){var n=this;e>=0&&this.adapter.setTabIndexForElementIndex(e,-1),setTimeout((function(){n.adapter.isFocusInsideList()||n.setTabindexToFirstSelectedItem_()}),0)}},{key:"handleKeydown",value:function(t,e,n){var i="ArrowLeft"===U(t),r="ArrowUp"===U(t),o="ArrowRight"===U(t),a="ArrowDown"===U(t),s="Home"===U(t),c="End"===U(t),l="Enter"===U(t),d="Spacebar"===U(t);if(this.adapter.isRootFocused())r||c?(t.preventDefault(),this.focusLastElement()):(a||s)&&(t.preventDefault(),this.focusFirstElement());else{var u=this.adapter.getFocusedElementIndex();if(!(-1===u&&(u=n)<0)){var f;if(this.isVertical_&&a||!this.isVertical_&&o)this.preventDefaultEvent(t),f=this.focusNextElement(u);else if(this.isVertical_&&r||!this.isVertical_&&i)this.preventDefaultEvent(t),f=this.focusPrevElement(u);else if(s)this.preventDefaultEvent(t),f=this.focusFirstElement();else if(c)this.preventDefaultEvent(t),f=this.focusLastElement();else if((l||d)&&e){var p=t.target;if(p&&"A"===p.tagName&&l)return;this.preventDefaultEvent(t),this.setSelectedIndexOnAction_(u,!0)}this.focusedItemIndex_=u,void 0!==f&&(this.setTabindexAtIndex_(f),this.focusedItemIndex_=f)}}}},{key:"handleSingleSelection",value:function(t,e,n){t!==K.KT.UNSET_INDEX&&(this.setSelectedIndexOnAction_(t,e,n),this.setTabindexAtIndex_(t),this.focusedItemIndex_=t)}},{key:"focusNextElement",value:function(t){var e=t+1;if(e>=this.adapter.getListItemCount()){if(!this.wrapFocus_)return t;e=0}return this.adapter.focusItemAtIndex(e),e}},{key:"focusPrevElement",value:function(t){var e=t-1;if(e<0){if(!this.wrapFocus_)return t;e=this.adapter.getListItemCount()-1}return this.adapter.focusItemAtIndex(e),e}},{key:"focusFirstElement",value:function(){return this.adapter.focusItemAtIndex(0),0}},{key:"focusLastElement",value:function(){var t=this.adapter.getListItemCount()-1;return this.adapter.focusItemAtIndex(t),t}},{key:"setEnabled",value:function(t,e){this.isIndexValid_(t)&&this.adapter.setDisabledStateForElementIndex(t,!e)}},{key:"preventDefaultEvent",value:function(t){var e=t.target,n="".concat(e.tagName).toLowerCase();-1===Q.indexOf(n)&&t.preventDefault()}},{key:"setSingleSelectionAtIndex_",value:function(t){var e=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];this.selectedIndex_!==t&&(this.selectedIndex_!==K.KT.UNSET_INDEX&&(this.adapter.setSelectedStateForElementIndex(this.selectedIndex_,!1),this.useActivatedClass_&&this.adapter.setActivatedStateForElementIndex(this.selectedIndex_,!1)),e&&this.adapter.setSelectedStateForElementIndex(t,!0),this.useActivatedClass_&&this.adapter.setActivatedStateForElementIndex(t,!0),this.setAriaForSingleSelectionAtIndex_(t),this.selectedIndex_=t,this.adapter.notifySelected(t))}},{key:"setMultiSelectionAtIndex_",value:function(t){var e=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],n=et(this.selectedIndex_),i=J(n,t);if(i.removed.length||i.added.length){var r,o=X(i.removed);try{for(o.s();!(r=o.n()).done;){var a=r.value;e&&this.adapter.setSelectedStateForElementIndex(a,!1),this.useActivatedClass_&&this.adapter.setActivatedStateForElementIndex(a,!1)}}catch(d){o.e(d)}finally{o.f()}var s,c=X(i.added);try{for(c.s();!(s=c.n()).done;){var l=s.value;e&&this.adapter.setSelectedStateForElementIndex(l,!0),this.useActivatedClass_&&this.adapter.setActivatedStateForElementIndex(l,!0)}}catch(d){c.e(d)}finally{c.f()}this.selectedIndex_=t,this.adapter.notifySelected(t,i)}}},{key:"setAriaForSingleSelectionAtIndex_",value:function(t){this.selectedIndex_===K.KT.UNSET_INDEX&&(this.ariaCurrentAttrValue_=this.adapter.getAttributeForElementIndex(t,K.j2.ARIA_CURRENT));var e=null!==this.ariaCurrentAttrValue_,n=e?K.j2.ARIA_CURRENT:K.j2.ARIA_SELECTED;this.selectedIndex_!==K.KT.UNSET_INDEX&&this.adapter.setAttributeForElementIndex(this.selectedIndex_,n,"false");var i=e?this.ariaCurrentAttrValue_:"true";this.adapter.setAttributeForElementIndex(t,n,i)}},{key:"setTabindexAtIndex_",value:function(t){this.focusedItemIndex_===K.KT.UNSET_INDEX&&0!==t?this.adapter.setTabIndexForElementIndex(0,-1):this.focusedItemIndex_>=0&&this.focusedItemIndex_!==t&&this.adapter.setTabIndexForElementIndex(this.focusedItemIndex_,-1),this.adapter.setTabIndexForElementIndex(t,0)}},{key:"setTabindexToFirstSelectedItem_",value:function(){var t=0;"number"==typeof this.selectedIndex_&&this.selectedIndex_!==K.KT.UNSET_INDEX?t=this.selectedIndex_:tt(this.selectedIndex_)&&this.selectedIndex_.size>0&&(t=Math.min.apply(Math,B(this.selectedIndex_))),this.setTabindexAtIndex_(t)}},{key:"isIndexValid_",value:function(t){if(t instanceof Set){if(!this.isMulti_)throw new Error("MDCListFoundation: Array of index is only supported for checkbox based list");if(0===t.size)return!0;var e,n=!1,i=X(t);try{for(i.s();!(e=i.n()).done;){var r=e.value;if(n=this.isIndexInRange_(r))break}}catch(o){i.e(o)}finally{i.f()}return n}if("number"==typeof t){if(this.isMulti_)throw new Error("MDCListFoundation: Expected array of index for checkbox based list but got number: "+t);return t===K.KT.UNSET_INDEX||this.isIndexInRange_(t)}return!1}},{key:"isIndexInRange_",value:function(t){var e=this.adapter.getListItemCount();return t>=0&&t<e}},{key:"setSelectedIndexOnAction_",value:function(t,e,n){if(!this.adapter.getDisabledStateForElementIndex(t)){var i=t;this.isMulti_&&(i=new Set([t])),this.isIndexValid_(i)&&(this.isMulti_?this.toggleMultiAtIndex(t,n,e):e||n?this.setSingleSelectionAtIndex_(t,e):this.selectedIndex_===t&&this.setSingleSelectionAtIndex_(K.KT.UNSET_INDEX),e&&this.adapter.notifyAction(t))}}},{key:"toggleMultiAtIndex",value:function(t,e){var n=!(arguments.length>2&&void 0!==arguments[2])||arguments[2],i=!1;i=void 0===e?!this.adapter.getSelectedStateForElementIndex(t):e;var r=et(this.selectedIndex_);i?r.add(t):r.delete(t),this.setMultiSelectionAtIndex_(r,n)}}])&&G(e.prototype,n),i&&G(e,i),o}(l.K);function it(t){return(it="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function rt(){var t=at(["\n <mwc-list-item noninteractive>","</mwc-list-item>\n "]);return rt=function(){return t},t}function ot(){var t=at(["\n \x3c!-- @ts-ignore --\x3e\n <ul\n tabindex=",'\n role="','"\n aria-label="','"\n class="mdc-list"\n @keydown=',"\n @focusin=","\n @focusout=","\n @request-selected=","\n @list-item-rendered=",">\n <slot></slot>\n ","\n </ul>\n "]);return ot=function(){return t},t}function at(t,e){return e||(e=t.slice(0)),Object.freeze(Object.defineProperties(t,{raw:{value:Object.freeze(e)}}))}function st(t,e){var n;if("undefined"==typeof Symbol||null==t[Symbol.iterator]){if(Array.isArray(t)||(n=function(t,e){if(!t)return;if("string"==typeof t)return ct(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);"Object"===n&&t.constructor&&(n=t.constructor.name);if("Map"===n||"Set"===n)return Array.from(t);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return ct(t,e)}(t))||e&&t&&"number"==typeof t.length){n&&(t=n);var i=0,r=function(){};return{s:r,n:function(){return i>=t.length?{done:!0}:{done:!1,value:t[i++]}},e:function(t){throw t},f:r}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var o,a=!0,s=!1;return{s:function(){n=t[Symbol.iterator]()},n:function(){var t=n.next();return a=t.done,t},e:function(t){s=!0,o=t},f:function(){try{a||null==n.return||n.return()}finally{if(s)throw o}}}}function ct(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,i=new Array(e);n<e;n++)i[n]=t[n];return i}function lt(t,e,n,i,r,o,a){try{var s=t[o](a),c=s.value}catch(l){return void n(l)}s.done?e(c):Promise.resolve(c).then(i,r)}function dt(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function ut(t,e,n){return(ut="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(t,e,n){var i=function(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=yt(t)););return t}(t,e);if(i){var r=Object.getOwnPropertyDescriptor(i,e);return r.get?r.get.call(n):r.value}})(t,e,n||t)}function ft(t,e){return(ft=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function pt(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,i=yt(t);if(e){var r=yt(this).constructor;n=Reflect.construct(i,arguments,r)}else n=i.apply(this,arguments);return mt(this,n)}}function mt(t,e){return!e||"object"!==it(e)&&"function"!=typeof e?ht(t):e}function ht(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}function yt(t){return(yt=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var vt=function(t){return t.hasAttribute("mwc-list-item")};function gt(){var t=this,e=this.itemsReadyResolver;this.itemsReady=new Promise((function(e){return t.itemsReadyResolver=e})),e()}var bt=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&ft(t,e)}(d,t);var e,n,i,o,a,l=pt(d);function d(){var t;!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,d),(t=l.call(this)).mdcAdapter=null,t.mdcFoundationClass=nt,t.activatable=!1,t.multi=!1,t.wrapFocus=!1,t.itemRoles=null,t.innerRole=null,t.innerAriaLabel=null,t.rootTabbable=!1,t.previousTabindex=null,t.noninteractive=!1,t.itemsReadyResolver=function(){},t.itemsReady=Promise.resolve([]),t.items_=[];var e=function(t){var e,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:50;return function(){var i=!(arguments.length>0&&void 0!==arguments[0])||arguments[0];clearTimeout(e),e=setTimeout((function(){t(i)}),n)}}(t.layout.bind(ht(t)));return t.debouncedLayout=function(){var n=!(arguments.length>0&&void 0!==arguments[0])||arguments[0];gt.call(ht(t)),e(n)},t}return e=d,(n=[{key:"_getUpdateComplete",value:(o=regeneratorRuntime.mark((function t(){return regeneratorRuntime.wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return t.next=2,ut(yt(d.prototype),"_getUpdateComplete",this).call(this);case 2:return t.next=4,this.itemsReady;case 4:case"end":return t.stop()}}),t,this)})),a=function(){var t=this,e=arguments;return new Promise((function(n,i){var r=o.apply(t,e);function a(t){lt(r,n,i,a,s,"next",t)}function s(t){lt(r,n,i,a,s,"throw",t)}a(void 0)}))},function(){return a.apply(this,arguments)})},{key:"updateItems",value:function(){var t,e=this,n=[],i=st(this.assignedElements);try{for(i.s();!(t=i.n()).done;){var r=t.value;vt(r)&&(n.push(r),r._managingList=this),r.hasAttribute("divider")&&!r.hasAttribute("role")&&r.setAttribute("role","separator")}}catch(c){i.e(c)}finally{i.f()}this.items_=n;var o=new Set;if(this.items_.forEach((function(t,n){e.itemRoles?t.setAttribute("role",e.itemRoles):t.removeAttribute("role"),t.selected&&o.add(n)})),this.multi)this.select(o);else{var a=o.size?o.entries().next().value[1]:-1;this.select(a)}var s=new Event("items-updated",{bubbles:!0,composed:!0});this.dispatchEvent(s)}},{key:"render",value:function(){var t=null===this.innerRole?void 0:this.innerRole,e=null===this.innerAriaLabel?void 0:this.innerAriaLabel,n=this.rootTabbable?"0":"-1";return(0,r.dy)(ot(),n,(0,c.o)(t),(0,c.o)(e),this.onKeydown,this.onFocusIn,this.onFocusOut,this.onRequestSelected,this.onListItemConnected,this.renderPlaceholder())}},{key:"renderPlaceholder",value:function(){return void 0!==this.emptyMessage&&0===this.assignedElements.length?(0,r.dy)(rt(),this.emptyMessage):null}},{key:"firstUpdated",value:function(){ut(yt(d.prototype),"firstUpdated",this).call(this),this.items.length||(this.mdcFoundation.setMulti(this.multi),this.layout())}},{key:"onFocusIn",value:function(t){if(this.mdcFoundation&&this.mdcRoot){var e=this.getIndexOfTarget(t);this.mdcFoundation.handleFocusIn(t,e)}}},{key:"onFocusOut",value:function(t){if(this.mdcFoundation&&this.mdcRoot){var e=this.getIndexOfTarget(t);this.mdcFoundation.handleFocusOut(t,e)}}},{key:"onKeydown",value:function(t){if(this.mdcFoundation&&this.mdcRoot){var e=this.getIndexOfTarget(t),n=t.target,i=vt(n);this.mdcFoundation.handleKeydown(t,i,e)}}},{key:"onRequestSelected",value:function(t){if(this.mdcFoundation){var e=this.getIndexOfTarget(t);if(-1===e&&(this.layout(),-1===(e=this.getIndexOfTarget(t))))return;if(this.items[e].disabled)return;var n=t.detail.selected,i=t.detail.source;this.mdcFoundation.handleSingleSelection(e,"interaction"===i,n),t.stopPropagation()}}},{key:"getIndexOfTarget",value:function(t){var e,n=this.items,i=st(t.composedPath());try{for(i.s();!(e=i.n()).done;){var r=e.value,o=-1;if((0,s.OE)(r)&&vt(r)&&(o=n.indexOf(r)),-1!==o)return o}}catch(a){i.e(a)}finally{i.f()}return-1}},{key:"createAdapter",value:function(){var t=this;return this.mdcAdapter={getListItemCount:function(){return t.mdcRoot?t.items.length:0},getFocusedElementIndex:this.getFocusedItemIndex,getAttributeForElementIndex:function(e,n){if(!t.mdcRoot)return"";var i=t.items[e];return i?i.getAttribute(n):""},setAttributeForElementIndex:function(e,n,i){if(t.mdcRoot){var r=t.items[e];r&&r.setAttribute(n,i)}},focusItemAtIndex:function(e){var n=t.items[e];n&&n.focus()},setTabIndexForElementIndex:function(e,n){var i=t.items[e];i&&(i.tabindex=n)},notifyAction:function(e){var n={bubbles:!0,composed:!0};n.detail={index:e};var i=new CustomEvent("action",n);t.dispatchEvent(i)},notifySelected:function(e,n){var i={bubbles:!0,composed:!0};i.detail={index:e,diff:n};var r=new CustomEvent("selected",i);t.dispatchEvent(r)},isFocusInsideList:function(){return(0,s.WU)(t)},isRootFocused:function(){var e=t.mdcRoot;return e.getRootNode().activeElement===e},setDisabledStateForElementIndex:function(e,n){var i=t.items[e];i&&(i.disabled=n)},getDisabledStateForElementIndex:function(e){var n=t.items[e];return!!n&&n.disabled},setSelectedStateForElementIndex:function(e,n){var i=t.items[e];i&&(i.selected=n)},getSelectedStateForElementIndex:function(e){var n=t.items[e];return!!n&&n.selected},setActivatedStateForElementIndex:function(e,n){var i=t.items[e];i&&(i.activated=n)}},this.mdcAdapter}},{key:"selectUi",value:function(t){var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=this.items[t];n&&(n.selected=!0,n.activated=e)}},{key:"deselectUi",value:function(t){var e=this.items[t];e&&(e.selected=!1,e.activated=!1)}},{key:"select",value:function(t){this.mdcFoundation&&this.mdcFoundation.setSelectedIndex(t)}},{key:"toggle",value:function(t,e){this.multi&&this.mdcFoundation.toggleMultiAtIndex(t,e)}},{key:"onListItemConnected",value:function(t){var e=t.target;this.layout(-1===this.items.indexOf(e))}},{key:"layout",value:function(){var t=!(arguments.length>0&&void 0!==arguments[0])||arguments[0];t&&this.updateItems();var e,n=this.items[0],i=st(this.items);try{for(i.s();!(e=i.n()).done;){var r=e.value;r.tabindex=-1}}catch(o){i.e(o)}finally{i.f()}n&&(this.noninteractive?this.previousTabindex||(this.previousTabindex=n):n.tabindex=0),this.itemsReadyResolver()}},{key:"getFocusedItemIndex",value:function(){if(!this.mdcRoot)return-1;if(!this.items.length)return-1;var t=(0,s.Mh)();if(!t.length)return-1;for(var e=t.length-1;e>=0;e--){var n=t[e];if(vt(n))return this.items.indexOf(n)}return-1}},{key:"focusItemAtIndex",value:function(t){var e,n=st(this.items);try{for(n.s();!(e=n.n()).done;){var i=e.value;if(0===i.tabindex){i.tabindex=-1;break}}}catch(r){n.e(r)}finally{n.f()}this.items[t].tabindex=0,this.items[t].focus()}},{key:"focus",value:function(){var t=this.mdcRoot;t&&t.focus()}},{key:"blur",value:function(){var t=this.mdcRoot;t&&t.blur()}},{key:"assignedElements",get:function(){var t=this.slotElement;return t?t.assignedNodes({flatten:!0}).filter(s.OE):[]}},{key:"items",get:function(){return this.items_}},{key:"selected",get:function(){var t=this.index;if(!tt(t))return-1===t?null:this.items[t];var e,n=[],i=st(t);try{for(i.s();!(e=i.n()).done;){var r=e.value;n.push(this.items[r])}}catch(o){i.e(o)}finally{i.f()}return n}},{key:"index",get:function(){return this.mdcFoundation?this.mdcFoundation.getSelectedIndex():-1}}])&&dt(e.prototype,n),i&&dt(e,i),d}(o.H);function _t(){var t=function(t,e){e||(e=t.slice(0));return Object.freeze(Object.defineProperties(t,{raw:{value:Object.freeze(e)}}))}(['@keyframes mdc-ripple-fg-radius-in{from{animation-timing-function:cubic-bezier(0.4, 0, 0.2, 1);transform:translate(var(--mdc-ripple-fg-translate-start, 0)) scale(1)}to{transform:translate(var(--mdc-ripple-fg-translate-end, 0)) scale(var(--mdc-ripple-fg-scale, 1))}}@keyframes mdc-ripple-fg-opacity-in{from{animation-timing-function:linear;opacity:0}to{opacity:var(--mdc-ripple-fg-opacity, 0)}}@keyframes mdc-ripple-fg-opacity-out{from{animation-timing-function:linear;opacity:var(--mdc-ripple-fg-opacity, 0)}to{opacity:0}}:host{display:block}.mdc-list{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;font-family:Roboto, sans-serif;font-family:var(--mdc-typography-subtitle1-font-family, var(--mdc-typography-font-family, Roboto, sans-serif));font-size:1rem;font-size:var(--mdc-typography-subtitle1-font-size, 1rem);line-height:1.75rem;line-height:var(--mdc-typography-subtitle1-line-height, 1.75rem);font-weight:400;font-weight:var(--mdc-typography-subtitle1-font-weight, 400);letter-spacing:0.009375em;letter-spacing:var(--mdc-typography-subtitle1-letter-spacing, 0.009375em);text-decoration:inherit;text-decoration:var(--mdc-typography-subtitle1-text-decoration, inherit);text-transform:inherit;text-transform:var(--mdc-typography-subtitle1-text-transform, inherit);line-height:1.5rem;margin:0;padding:8px 0;list-style-type:none;color:rgba(0, 0, 0, 0.87);color:var(--mdc-theme-text-primary-on-background, rgba(0, 0, 0, 0.87));padding:var(--mdc-list-vertical-padding, 8px) 0}.mdc-list:focus{outline:none}.mdc-list-item{height:48px}.mdc-list--dense{padding-top:4px;padding-bottom:4px;font-size:.812rem}.mdc-list ::slotted([divider]){height:0;margin:0;border:none;border-bottom-width:1px;border-bottom-style:solid;border-bottom-color:rgba(0, 0, 0, 0.12)}.mdc-list ::slotted([divider][padded]){margin:0 var(--mdc-list-side-padding, 16px)}.mdc-list ::slotted([divider][inset]){margin-left:var(--mdc-list-inset-margin, 72px);margin-right:0;width:calc(100% - var(--mdc-list-inset-margin, 72px))}.mdc-list-group[dir=rtl] .mdc-list ::slotted([divider][inset]),[dir=rtl] .mdc-list-group .mdc-list ::slotted([divider][inset]){margin-left:0;margin-right:var(--mdc-list-inset-margin, 72px)}.mdc-list ::slotted([divider][inset][padded]){width:calc(100% - var(--mdc-list-inset-margin, 72px) - var(--mdc-list-side-padding, 16px))}.mdc-list--dense ::slotted([mwc-list-item]){height:40px}.mdc-list--dense ::slotted([mwc-list]){--mdc-list-item-graphic-size: 20px}.mdc-list--two-line.mdc-list--dense ::slotted([mwc-list-item]),.mdc-list--avatar-list.mdc-list--dense ::slotted([mwc-list-item]){height:60px}.mdc-list--avatar-list.mdc-list--dense ::slotted([mwc-list]){--mdc-list-item-graphic-size: 36px}:host([noninteractive]){pointer-events:none;cursor:default}.mdc-list--dense ::slotted(.mdc-list-item__primary-text){display:block;margin-top:0;line-height:normal;margin-bottom:-20px}.mdc-list--dense ::slotted(.mdc-list-item__primary-text)::before{display:inline-block;width:0;height:24px;content:"";vertical-align:0}.mdc-list--dense ::slotted(.mdc-list-item__primary-text)::after{display:inline-block;width:0;height:20px;content:"";vertical-align:-20px}']);return _t=function(){return t},t}(0,i.gn)([(0,r.Cb)({type:String})],bt.prototype,"emptyMessage",void 0),(0,i.gn)([(0,r.IO)(".mdc-list")],bt.prototype,"mdcRoot",void 0),(0,i.gn)([(0,r.IO)("slot")],bt.prototype,"slotElement",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean}),(0,a.P)((function(t){this.mdcFoundation&&this.mdcFoundation.setUseActivatedClass(t)}))],bt.prototype,"activatable",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean}),(0,a.P)((function(t,e){this.mdcFoundation&&this.mdcFoundation.setMulti(t),void 0!==e&&this.layout()}))],bt.prototype,"multi",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean}),(0,a.P)((function(t){this.mdcFoundation&&this.mdcFoundation.setWrapFocus(t)}))],bt.prototype,"wrapFocus",void 0),(0,i.gn)([(0,r.Cb)({type:String}),(0,a.P)((function(t,e){void 0!==e&&this.updateItems()}))],bt.prototype,"itemRoles",void 0),(0,i.gn)([(0,r.Cb)({type:String})],bt.prototype,"innerRole",void 0),(0,i.gn)([(0,r.Cb)({type:String})],bt.prototype,"innerAriaLabel",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean})],bt.prototype,"rootTabbable",void 0),(0,i.gn)([(0,r.Cb)({type:Boolean,reflect:!0}),(0,a.P)((function(t){var e=this.slotElement;if(t&&e){var n=(0,s.f6)(e,'[tabindex="0"]');this.previousTabindex=n,n&&n.setAttribute("tabindex","-1")}else!t&&this.previousTabindex&&(this.previousTabindex.setAttribute("tabindex","0"),this.previousTabindex=null)}))],bt.prototype,"noninteractive",void 0);var xt=(0,r.iv)(_t());function It(t){return(It="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function St(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function wt(t,e){return(wt=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function Et(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,i=Rt(t);if(e){var r=Rt(this).constructor;n=Reflect.construct(i,arguments,r)}else n=i.apply(this,arguments);return At(this,n)}}function At(t,e){return!e||"object"!==It(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function Rt(t){return(Rt=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var Tt=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&wt(t,e)}(n,t);var e=Et(n);function n(){return St(this,n),e.apply(this,arguments)}return n}(bt);Tt.styles=xt,Tt=(0,i.gn)([(0,r.Mo)("mwc-list")],Tt)},36220:function(t,e,n){"use strict";function i(t){return(i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}n.d(e,{gn:function(){return r}});function r(t,e,n,r){var o,a=arguments.length,s=a<3?e:null===r?r=Object.getOwnPropertyDescriptor(e,n):r;if("object"===("undefined"==typeof Reflect?"undefined":i(Reflect))&&"function"==typeof Reflect.decorate)s=Reflect.decorate(t,e,n,r);else for(var c=t.length-1;c>=0;c--)(o=t[c])&&(s=(a<3?o(s):a>3?o(e,n,s):o(e,n))||s);return a>3&&s&&Object.defineProperty(e,n,s),s}Object.create;Object.create}}]);
//# sourceMappingURL=chunk.a6a05e49b5487d486578.js.map
|
PypiClean
|
/timemory-3.3.0rc2.tar.gz/timemory-3.3.0rc2/external/llvm-ompt/libomptarget/README.txt
|
README for the LLVM* OpenMP* Offloading Runtime Library (libomptarget)
======================================================================
How to Build the LLVM* OpenMP* Offloading Runtime Library (libomptarget)
========================================================================
In-tree build:
$ cd where-you-want-to-live
Check out openmp (libomptarget lives under ./libomptarget) into llvm/projects
$ cd where-you-want-to-build
$ mkdir build && cd build
$ cmake path/to/llvm -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler>
$ make omptarget
Out-of-tree build:
$ cd where-you-want-to-live
Check out openmp (libomptarget lives under ./libomptarget)
$ cd where-you-want-to-live/openmp/libomptarget
$ mkdir build && cd build
$ cmake path/to/openmp -DCMAKE_C_COMPILER=<C compiler> -DCMAKE_CXX_COMPILER=<C++ compiler>
$ make
For details about building, please look at README.rst in the parent directory.
Architectures Supported
=======================
The current library has been only tested in Linux operating system and the
following host architectures:
* Intel(R) 64 architecture
* IBM(R) Power architecture (big endian)
* IBM(R) Power architecture (little endian)
* ARM(R) AArch64 architecture (little endian)
The currently supported offloading device architectures are:
* Intel(R) 64 architecture (generic 64-bit plugin - mostly for testing purposes)
* IBM(R) Power architecture (big endian) (generic 64-bit plugin - mostly for testing purposes)
* IBM(R) Power architecture (little endian) (generic 64-bit plugin - mostly for testing purposes)
* ARM(R) AArch64 architecture (little endian) (generic 64-bit plugin - mostly for testing purposes)
* CUDA(R) enabled 64-bit NVIDIA(R) GPU architectures
Supported RTL Build Configurations
==================================
Supported Architectures: Intel(R) 64, IBM(R) Power 7 and Power 8
---------------------------
| gcc | clang |
--------------|------------|------------|
| Linux* OS | Yes(1) | Yes(2) |
-----------------------------------------
(1) gcc version 4.8.2 or later is supported.
(2) clang version 3.7 or later is supported.
Front-end Compilers that work with this RTL
===========================================
The following compilers are known to do compatible code generation for
this RTL:
- clang (from https://github.com/clang-ykt )
- clang (development branch at http://clang.llvm.org - several features still
under development)
-----------------------------------------------------------------------
Notices
=======
This library and related compiler support is still under development, so the
employed interface is likely to change in the future.
*Other names and brands may be claimed as the property of others.
|
PypiClean
|
/es-fluent-0.0.5.tar.gz/es-fluent-0.0.5/docs/es_fluent.filters.rst
|
Filters
=======
Filters typically have corresponding elasticsearch filters. ESFluent helps
intelligently compose them and provides a common interface, via
:func:`~es_fluent.filters.Filter.to_query` whose responsibility is to
generate the corresponding elasticsearch filter dictionary.
A Filter can :class:`~es_fluent.filters.geometry.Geometry` clean up data and
also generate novel queries.
Basic Filters
-------------
.. automodule:: es_fluent.filters.core
:members:
:undoc-members:
:inherited-members:
:show-inheritance:
Geometry Filters
----------------
.. automodule:: es_fluent.filters.geometry
:members:
:undoc-members:
:inherited-members:
:show-inheritance:
Utilities
---------
.. autoclass:: es_fluent.filters.FilterRegistry
.. autoclass:: es_fluent.filters.Filter
.. autofunction:: es_fluent.filters.build_filter
|
PypiClean
|
/python-graspfile-0.4.1.tar.gz/python-graspfile-0.4.1/src/graspfile/torfile.py
|
from collections import OrderedDict
import graspfile.torparser as torparser
_debug_ = False
# trick for py2/3 compatibility
if 'basestring' not in globals():
basestring = str
"""List of acceptable GraspTorObject types"""
grasp_object_types = [""]
class GraspTorValue:
"""A container for values from GraspTorMember objects"""
def __init__(self, tor_value="_None"):
"""Container for values within GraspTorMember, GraspTorStruct, and GraspTorSequence objects.
Args:
tor_value: a `pyparsing.ParseResults` object from a tor_value tokentcOK.
"""
#: str: The value as a string
self.value = None
#: str: The unit of the value as a string
self.unit = None
if tor_value != "_None":
self.fill(tor_value)
def __repr__(self):
"""Return a useful string representation of the GraspTorValue object.
Returns:
string representation of GraspTorValue in format suitable for use in a .tor file."""
if self.unit:
return repr(self.value) + " " + self.unit
else:
if isinstance(self.value, basestring):
return self.value
else:
return repr(self.value)
def fill(self, tor_value):
"""Fills the GraspTorValue object from output by the parser.
Args:
tor_value: a `pyparsing.ParseResults` class output by the torparser module.
"""
if _debug_:
print("GraspTorValue.fill received: {:}".format(tor_value))
try:
if isinstance(tor_value, basestring):
self.value = tor_value
else:
if len(tor_value) > 1:
self.value = tor_value[0]
self.unit = tor_value[1]
else:
self.value = tor_value[0]
except TypeError:
if _debug_:
print("TypeError caught for tor_value = {:}".format(tor_value))
self.value = tor_value
class GraspTorMember:
"""A container for the member parameter of an GraspTorObject """
def __init__(self, tor_member=None):
self.name = None
self._type = None
self._value = None
if tor_member:
self.fill(tor_member)
def __repr__(self):
"""Return a useful string representation of the GraspTorMember object."""
return self._value.__repr__()
def fill(self, tor_member):
if _debug_:
print("GraspTorMember.fill received: {:}".format(tor_member))
self.name = tor_member[0]
if len(tor_member) > 2:
self._type = tor_member[1]
else:
self._type = "value"
if self._type == "struct":
self._value = GraspTorStruct(tor_member[1:])
elif self._type == "ref":
self._value = GraspTorRef(tor_member[1:])
elif self._type == "sequence":
self._value = GraspTorSequence(tor_member[1:])
else:
self._value = GraspTorValue(tor_member[1])
@property
def type(self):
"""Return the type of the GraspTorMember"""
return self._type
@property
def value(self):
"""Short circuit through tor_value to supply tV.value if it is a simple value"""
if self._type == "value":
return self._value.value
else:
return self._value
@value.setter
def value(self, new_val):
if self._type == "value":
if isinstance(new_val, GraspTorValue):
self._value = new_val
else:
self._value.value = new_val
else:
self._value = new_val
@property
def unit(self):
"""Short circuit through tor_value to supply tV.unit if appropriate, else return None"""
if self._type == "value":
return self._value.unit
else:
return None
@unit.setter
def unit(self, new_unit):
if self._type == "value":
self._value.unit = new_unit
else:
pass
def __getitem__(self, key):
return self.value[key].value
def __setitem__(self, key, new_value):
self.value[key].value = new_value
class GraspTorRef:
"""A container for a value that is a reference to another GraspTorObject"""
def __init__(self, tor_ref=None):
#: str: Reference to another GraspTorObject
self.ref = None
if tor_ref:
self.fill(tor_ref)
def __repr__(self):
"""Return a useful string representation of the GraspTorRef object."""
return "ref({:s})".format(self.ref)
def fill(self, tor_ref):
if _debug_:
print("GraspTorRef.fill received: {:}".format(tor_ref))
self.ref = tor_ref[1]
class GraspTorSequence(list):
"""A container for a value that is a sequence of GraspTorValues."""
def __init__(self, tor_seq=None):
super(GraspTorSequence, self).__init__()
if tor_seq:
self.fill(tor_seq)
def __repr__(self):
"""Return a useful string representation of the GraspTorSequence object."""
outstring = "sequence("
for v in self:
outstring += (repr(v)) + ","
outstring = outstring[:-1] + ")"
return outstring
def fill(self, tor_seq):
if _debug_:
print("GraspTorSequence.fill received: {:}".format(tor_seq))
for t in tor_seq[1:]:
self.append(GraspTorValue(t))
class GraspTorStruct(OrderedDict):
"""A container for a GraspTorStruct, that has a number of members. Members are
stored as an OrderedDict."""
def __init__(self, tor_struct=None):
OrderedDict.__init__(self)
if tor_struct:
self.fill(tor_struct)
else:
pass
def __repr__(self):
"""Return a useful string representation of the GraspTorSequence object."""
outstring = "struct("
for v in iter(self.keys()):
outstring += v + ": " + repr(self[v]) + ", "
outstring = outstring[:-2] + ")"
return outstring
def fill(self, tor_struct):
"""Fill the GraspTorObject using the pyparsing results"""
if _debug_:
print("GraspTorStruct.fill received: {:}".format(tor_struct))
for t in tor_struct[1:]:
self[t[0]] = GraspTorMember(t)
class GraspTorComment:
"""A container for comments from a GraspTorFile"""
def __init__(self, tor_comment=None):
#: str: Name of comment object
self.name = None
#: str: The type of the object
self._type = "comment"
#: str: Test of the comment object
self.text = None
#: int: Line number that the comment appears in.
self.location = None
if tor_comment:
self.fill(tor_comment)
def __repr__(self):
"""Return the comment as // prefixed lines"""
return "\n".join(self.text)
def fill(self, tor_comment):
if _debug_:
print("GraspTorComment.fill received: {:}".format(tor_comment))
print("with name: {:}".format(tor_comment[0]))
self.name = tor_comment[0]
self.location = int(self.name.lstrip("comment"))
self.text = tor_comment[2]
@property
def type(self):
"""Return the type of the comment object as "commment"."""
return self._type
class GraspTorObject(OrderedDict):
"""A container for a GraspTorObject, that has a name, a type and a number of members. Members are
stored as an OrderedDict."""
def __init__(self, tor_obj=None):
OrderedDict.__init__(self)
self._name = None
self._type = None
if isinstance(tor_obj, str):
self.read_str(tor_obj)
elif isinstance(tor_obj, torparser.pp.ParseResults):
self.fill(tor_obj)
else:
pass
def __repr__(self):
"""Return a useful string representation of the GraspTorObject object."""
if self.type == "comment":
outstring = repr(self["comment"]) + "\n"
else:
memberstrings = []
for k in iter(self.keys()):
memberstrings.append(k + " : " + repr(self[k]))
memberstring = ",\n ".join(memberstrings)
outstring = """{:} {:}
(
{:}
)
""".format(self._name, self._type, memberstring)
return outstring
def read_str(self, tor_str):
"""Read the contents of the string into a tor_object and then process the results"""
res = torparser.tor_object.parseString(tor_str)
self.fill(res)
def fill(self, tor_obj):
"""Fill the GraspTorObject using the pyparsing results"""
if _debug_:
print("GraspTorObject.fill received: {:}".format(tor_obj))
print("Type:", type(tor_obj))
self._name = tor_obj[0]
self._type = tor_obj[1]
for r in tor_obj[2:]:
if self._type == "comment":
self["comment"] = GraspTorComment(tor_obj)
else:
self[r[0]] = GraspTorMember(r)
@property
def name(self):
"""Return the name of the GraspTorObject"""
return self._name
@name.setter
def name(self, new_name):
"""Set the name of the GraspTorObject"""
self._name = new_name
@property
def type(self):
"""Return the type of the GraspTorObject"""
return self._type
@type.setter
def type(self, new_type):
"""Set the type of the GraspTorObject. Checks that type is sane"""
# if new_type in grasp_object_types:
# self._type = new_type
# else:
# raise ValueError("Unknown type for a GraspTorObject")
self._type = new_type
class GraspTorFile(OrderedDict):
"""A container for objects read from a tor file. Subclasses OrderedDict to provide a dict of torObjects
keyed by name, and sorted by insertion order"""
def __init__(self, file_like=None):
"""Create a TorFile object, and if fileLike is specified, read the file"""
OrderedDict.__init__(self)
self._parser = torparser.tor_file
if file_like:
self.read(file_like)
def __repr__(self):
"""Return a GRASP readable string for the GraspTorFile object"""
outstring = ""
for k in iter(self.keys()):
outstring += repr(self[k]) + "\n"
return outstring
def read(self, file_like):
"""Read a list of torObjects and torComments from a fileLike object. We use pyparsing.ParserElement's parseFile
method, which can take either a file-like object or a filename to open. If you wish to parse an existing string
object, used StringIO to supply a file-like object containing the string."""
# Parse the file
res = self._parser.parseFile(file_like)
# Turn the parse results into objects
self.fill(res)
def fill(self, tor_file):
"""Fill the GraspTorFile using the parser results in tor_file"""
for r in tor_file:
temp = GraspTorObject(r)
self[temp.name] = temp
|
PypiClean
|
/mattermost_api_reference_client-4.0.0.post1.tar.gz/mattermost_api_reference_client-4.0.0.post1/mattermost_api_reference_client/models/data_retention_policy.py
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="DataRetentionPolicy")
@attr.s(auto_attribs=True)
class DataRetentionPolicy:
"""
Attributes:
display_name (Union[Unset, str]): The display name for this retention policy.
post_duration (Union[Unset, int]): The number of days a message will be retained before being deleted by this
policy. If this value is less than 0, the policy has infinite retention (i.e. messages are never deleted).
id (Union[Unset, str]): The ID of this retention policy.
"""
display_name: Union[Unset, str] = UNSET
post_duration: Union[Unset, int] = UNSET
id: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
display_name = self.display_name
post_duration = self.post_duration
id = self.id
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if display_name is not UNSET:
field_dict["display_name"] = display_name
if post_duration is not UNSET:
field_dict["post_duration"] = post_duration
if id is not UNSET:
field_dict["id"] = id
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
display_name = d.pop("display_name", UNSET)
post_duration = d.pop("post_duration", UNSET)
id = d.pop("id", UNSET)
data_retention_policy = cls(
display_name=display_name,
post_duration=post_duration,
id=id,
)
data_retention_policy.additional_properties = d
return data_retention_policy
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
PypiClean
|
/pcalf-1.2.1.tar.gz/pcalf-1.2.1/README.md
|
# PCALF - Retrieve Calcyanin protein and ccyA gene from genomes.
PCALF stand for Python CALcyanin Finder.
Benzerara, K., Duprat, E., Bitard-Feildel, T., Caumes, G., Cassier-Chauvat, C., Chauvat, F., ... & Callebaut, I. (2022). [A new gene family diagnostic for intracellular biomineralization of amorphous Ca carbonates by cyanobacteria](https://doi.org/10.1093/gbe/evac026). Genome Biology and Evolution, 14(3), evac026.
<br><br>
# Table of Contents :
- [Calcyanin detection](#Calcyanin-detection-)
- [Decision tree](#Decision-tree-)
- [Installation](#Installation-)
- [Dependencies](#Dependencies-)
- [Usage](#Usage-)
- [pcalf](#pcalf-)
- [pcalf-datasets-workflow](#pcalf-datasets-workflow-)
- [pcalf-annotate-workflow](#pcalf-annotate-workflow-)
- [pcalf-report](#pcalf-report-)
- [Workflow](#Workflow-)
<br><br>
## Calcyanin detection :
The ccyA gene is searched at the protein level following a simple [decision tree](#Decision-tree-) based on the specific composition of the C-ter extremity of this protein.
We use a weighted HMM profile specific of the glycine zipper triplication (aka GlyX3) to detect sequences with a putative glycine triplication. Sequences with at least one hit against this profile are annotated with three HMM profiles specific of each Glycine zipper : Gly1, Gly2 and Gly3.
A set of known calcyanins is used to infere the type of the N-ter extremity (X, Z, Y, CoBaHMA or ?). Finally, a flag is assign to each sequence depending on
its N-terminus type and its C-terminus modular organization.
<br><br>
## Decision tree :
<!--  -->
<img src="./decision_tree.jpeg" alt="decision_tree" width="100%"/>
## Installation :
```bash
mamba create -n pcalf -c k2sohigh pcalf;
```
or
```bash
pip3 install pcalf;
```
### Dependencies :
```python
python==3.9
pyhmmer==0.7.1
biopython==1.81
numpy>=1.21
pyyaml>=6
snakemake==7.22
pandas==1.5.3
tqdm==4.64.1
plotly==5.11.0
python-igraph==0.10.4
```
### External dependency :
```
blast
```
<br><br>
## Usage :
Pcalf is composed of several commands :
- [pcalf](#pcalf-)
- [pcalf-datasets-workflow](#pcalf-datasets-workflow-)
- [pcalf-annotate-workflow](#pcalf-annotate-workflow-)
- [pcalf-report](#pcalf-report-)
---
### pcalf :
This command can be used to look quickly for the presence of calcyanin in a set of amino acid sequences. It take one or more fasta files as input and output several files including a summary, a list of features and a list of raw hits produced during the search. It also produce updated HMMs and updated MSAs with newly detected calcyanin tagged as Calcyanin with known N-ter.
```
pcalf -i proteins.fasta
-o pcalf_results
--iterative-search
--iterative-update
--gly1-msa custom_gly1_msa.fasta
```
--iterative-search : If True, the search will be performed with profiles produced by the previous iteration until there is no new sequence detected or if --max-iteration is reached.
--iterative-update : True calcyanins (if any) will be added to the HMMs profiles iteratively starting with the best sequence (based on feature' score).
--gly1-msa : Use another MSA instead of the default one. A HMM profile will be built from the given MSA and Glycine weight will be increased.
#### Thresholds:
By default, coverage and E-value threshold are infered from the HMM profiles that will be used for the search. To define the thresholds, a MSA is converted into a simple fasta file by deleting all gaps and aligned against its own HMM profile. A soft filtering is made using a coverage threshold of 0.5. The maximum E-values and the minimum coverage values are used to define the thresholds as follows : `max(E-value)*10, min(coverage)-0.1`
---
### pcalf-datasets-workflow :
pcalf-datasets-workflow can be used to retrieve genomes from NCBI databases such as RefSeq and GenBank based on accession (e.g GCA_012769535.1) or TaxID.
Genomes and annotations (genes and CDS) will be downloaded using the new command line tools from [NCBI](https://www.ncbi.nlm.nih.gov/datasets/docs/v2/download-and-install/). If annotations does not exists for a genome, then genes and CDS will be predicted using [Prodigal](https://github.com/hyattpd/Prodigal).
A yaml file is also produced and can be used as input for [pcalf-annotate-workflow](#pcalf-annotate-workflow-).
```
GCA_012769535.1:
genome : /path/to/genome.gz
cds_faa: /path/to/cds_faa.gz
cds_fna: /path/to/cds_fna.gz
GCF_012769535.1:
...
```
example :
```
pcalf-datasets-workflow -t 1117
-o pcalf_datasets_results
-a file_with_accession.txt
-e file_with_accession_to_ignore.txt
```
The command above will download all cyanobacteria genome (taxid : 1117) and genomes specified in file_with_accession.txt. If any of them are specified in file_with_accession_to_ignore.txt, then they will be ignored.
---
### pcalf-annotate-workflow :
pcalf-annotate-workflow is actually the workflow that will help you recover calcyanin proteins and ccyA genes from a set of genomes.
This workflow is composed of multiple steps :
- genome taxonomic classification using [GTDB-TK V2](https://github.com/Ecogenomics/GTDBTk)
- genome quality assessment using [CheckM](https://github.com/Ecogenomics/CheckM/)
- calcyanin detection using [pcalf](#paclf-)
- calcyanin / ccyA linking using a specific python script.
- NCBI metadatas recovery using [NCBI](https://www.ncbi.nlm.nih.gov/datasets/docs/v2/)
Note, that GTDB-TK and checkM requires external databases, respectively [GTDB](https://gtdb.ecogenomic.org/downloads) and [CheckM datas](https://data.ace.uq.edu.au/public/CheckM_databases). In addition, it's advised to run GTDB-TK and CheckM on a computer cluster. Because pcalf-annotate-workflow rely on snakemake you can easily provide a snakemake profile through the --snakargs option to run it on your favorite cluster. On the other hand, you can skip the genome taxonomic classification and the quality assessment with the --quick flag.
pcalf-annotate-workflow take as input a yaml file with a specific format, see [pcalf-datasets-workflow](#pcalf-datasets-workflow-) for details.
example :
```
pcalf-annotate-workflow -i input_file.yaml
-o pcalf_annotate_results
--db db_file_from_another_run.sqlite3
--snakargs "--profile my_slurm_profile --use-conda --jobs 50"
--gtdb my_gtdb_directory
--checkm my_checkm_datas_directory
```
The command above will process all the genome specified in input_file.yaml through the pcalf-annotate-workflow including checkm and gtdb-tk steps. The sqlite3 file produced will be merged with db_file_from_another_run.sqlite3. The workflow will be ran on your computer cluster with 50 jobs at a time. See [snakemake documentation](https://snakemake.readthedocs.io/en/stable/) for details about cluster execution.
Several output files for each step will be produced but the final output is a sqlite3 database storing multiple tables:
```
- genome # NCBI metadatas
- gtdbtk # GTDB-TK classification results
- checkm # Checkm Results
- summary # PCALF summary table
- features # PCALF features table
- hits # PCALF hits table
- ccyA # ccyA table
- gly1 # Gly1 MSA
- gly2 # Gly2 MSA
- gly3 # Gly3 MSA
- glyx3 # Glyx3 MSA
- nterdb # N-ter table
```
You can use the sqlite3 database from another run as a basis for a new one. In this case, MSAs stored in the sqlite3 file will be used to generate HMM profiles for [pcalf](#pcalf-).
---
### pcalf-report :
This command produce an HTML report from a sqlite3 database given by [pcalf annotate workflow](#pcalf-annotate-workflow-).
example :
```
pcalf-report --db sqlite3_file.sqlite3 --out report.html
```
---
<br><br>
## Workflow :

|
PypiClean
|
/rinoh-typeface-trebuchetms-0.1.0.tar.gz/rinoh-typeface-trebuchetms-0.1.0/README.rst
|
==========================
rinoh-typeface-trebuchetms
==========================
This package provides the `Trebuchet MS`_ 1.22 typeface from Microsoft's `Core
fonts for the Web`_ for use with rinohtype_.
In order to comply with the EULA for these fonts, the font archive is
downloaded during installation of this package.
.. _Trebuchet MS: https://en.wikipedia.org/wiki/Trebuchet_MS
.. _Core fonts for the Web: https://en.wikipedia.org/wiki/Core_fonts_for_the_Web
.. _rinohtype: https://github.com/brechtm/rinohtype#readme
|
PypiClean
|
/lco-banzai-0.19.3.tar.gz/lco-banzai-0.19.3/astropy_helpers/astropy_helpers/extern/automodapi/smart_resolver.py
|
from docutils.nodes import literal, reference
def process_docstring(app, what, name, obj, options, lines):
if isinstance(obj, type):
env = app.env
if not hasattr(env, 'class_name_mapping'):
env.class_name_mapping = {}
mapping = env.class_name_mapping
mapping[obj.__module__ + '.' + obj.__name__] = name
def missing_reference_handler(app, env, node, contnode):
if not hasattr(env, 'class_name_mapping'):
env.class_name_mapping = {}
mapping = env.class_name_mapping
reftype = node['reftype']
reftarget = node['reftarget']
if reftype in ('obj', 'class', 'exc', 'meth'):
reftarget = node['reftarget']
suffix = ''
if reftarget not in mapping:
if '.' in reftarget:
front, suffix = reftarget.rsplit('.', 1)
else:
suffix = reftarget
if suffix.startswith('_') and not suffix.startswith('__'):
# If this is a reference to a hidden class or method,
# we can't link to it, but we don't want to have a
# nitpick warning.
return node[0].deepcopy()
if reftype in ('obj', 'meth') and '.' in reftarget:
if front in mapping:
reftarget = front
suffix = '.' + suffix
if (reftype in ('class', ) and '.' in reftarget and
reftarget not in mapping):
if '.' in front:
reftarget, _ = front.rsplit('.', 1)
suffix = '.' + suffix
reftarget = reftarget + suffix
prefix = reftarget.rsplit('.')[0]
inventory = env.intersphinx_named_inventory
if (reftarget not in mapping and
prefix in inventory):
if reftarget in inventory[prefix]['py:class']:
newtarget = inventory[prefix]['py:class'][reftarget][2]
if not node['refexplicit'] and \
'~' not in node.rawsource:
contnode = literal(text=reftarget)
newnode = reference('', '', internal=True)
newnode['reftitle'] = reftarget
newnode['refuri'] = newtarget
newnode.append(contnode)
return newnode
if reftarget in mapping:
newtarget = mapping[reftarget] + suffix
if not node['refexplicit'] and '~' not in node.rawsource:
contnode = literal(text=newtarget)
newnode = env.domains['py'].resolve_xref(
env, node['refdoc'], app.builder, 'class', newtarget,
node, contnode)
if newnode is not None:
newnode['reftitle'] = reftarget
return newnode
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
app.connect('missing-reference', missing_reference_handler)
|
PypiClean
|
/call_recorder_api-1.0.1-py3-none-any.whl/call_recorder_api/models/device_type.py
|
import pprint
import re # noqa: F401
import six
class DeviceType(object):
"""NOTE:
Do not edit the class manually.
"""
"""
allowed enum values
"""
ANDROID = "android"
IOS = "ios"
MAC = "mac"
WINDOWS = "windows"
WEB = "web"
CUSTOM = "custom"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""DeviceType - a model defined in spec""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeviceType, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeviceType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/peng-1.0.11.zip/peng-1.0.11/pypkg/term_echo.py
|
import os
import platform
import subprocess
import sys
###
# Functions
###
def ste(command, nindent, mdir, fpointer):
r"""
Echo terminal output.
Print STDOUT resulting from a given Bash shell command (relative to the
package :code:`pypkg` directory) formatted in reStructuredText
:param command: Bash shell command, relative to
:bash:`${PMISC_DIR}/pypkg`
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: bash
$ ${PMISC_DIR}/pypkg/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
.. ]]]
"""
term_echo(
"${{PMISC_DIR}}{sep}pypkg{sep}{cmd}".format(sep=os.path.sep, cmd=command),
nindent,
{"PMISC_DIR": mdir},
fpointer,
)
def term_echo(command, nindent=0, env=None, fpointer=None, cols=60):
"""
Print STDOUT resulting from a Bash shell command formatted in reStructuredText.
:param command: Bash shell command
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param env: Environment variable replacement dictionary. The Bash
command is pre-processed and any environment variable
represented in the full notation (:bash:`${...}`) is replaced.
The dictionary key is the environment variable name and the
dictionary value is the replacement value. For example, if
**command** is :code:`'${PYTHON_CMD} -m "x=5"'` and **env**
is :code:`{'PYTHON_CMD':'python3'}` the actual command issued
is :code:`'python3 -m "x=5"'`
:type env: dictionary
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param cols: Number of columns of output
:type cols: integer
"""
# pylint: disable=R0204
# Set argparse width so that output does not need horizontal scroll
# bar in narrow windows or displays
os.environ["COLUMNS"] = str(cols)
command_int = command
if env:
for var, repl in env.items():
command_int = command_int.replace("${" + var + "}", repl)
tokens = command_int.split(" ")
# Add Python interpreter executable for Python scripts on Windows since
# the shebang does not work
if (platform.system().lower() == "windows") and (tokens[0].endswith(".py")):
tokens = [sys.executable] + tokens
proc = subprocess.Popen(tokens, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = proc.communicate()[0]
if sys.hexversion >= 0x03000000:
stdout = stdout.decode("utf-8")
stdout = stdout.split("\n")
indent = nindent * " "
fpointer("\n", dedent=False)
fpointer("{0}.. code-block:: bash\n".format(indent), dedent=False)
fpointer("\n", dedent=False)
fpointer("{0} $ {1}\n".format(indent, command), dedent=False)
for line in stdout:
if line.strip():
fpointer(indent + " " + line.replace("\t", " ") + "\n", dedent=False)
else:
fpointer("\n", dedent=False)
fpointer("\n", dedent=False)
|
PypiClean
|
/one_interfaces-4.7.1.tar.gz/one_interfaces-4.7.1/one_interfaces/point3d_pb2.py
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='point3d.proto',
package='',
syntax='proto3',
serialized_options=_b('\252\002\021ONE.Models.CSharp'),
serialized_pb=_b('\n\rpoint3d.proto\"*\n\x07Point3D\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x42\x14\xaa\x02\x11ONE.Models.CSharpb\x06proto3')
)
_POINT3D = _descriptor.Descriptor(
name='Point3D',
full_name='Point3D',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='Point3D.x', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='y', full_name='Point3D.y', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='z', full_name='Point3D.z', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=17,
serialized_end=59,
)
DESCRIPTOR.message_types_by_name['Point3D'] = _POINT3D
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Point3D = _reflection.GeneratedProtocolMessageType('Point3D', (_message.Message,), {
'DESCRIPTOR' : _POINT3D,
'__module__' : 'point3d_pb2'
# @@protoc_insertion_point(class_scope:Point3D)
})
_sym_db.RegisterMessage(Point3D)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
PypiClean
|
/elastic-apm-6.18.0.tar.gz/elastic-apm-6.18.0/elasticapm/contrib/aiohttp/middleware.py
|
import aiohttp
from aiohttp.web import HTTPException, middleware
import elasticapm
from elasticapm import get_client
from elasticapm.conf import constants
from elasticapm.contrib.aiohttp.utils import get_data_from_request, get_data_from_response
from elasticapm.utils.disttracing import TraceParent
class AioHttpTraceParent(TraceParent):
@classmethod
def merge_duplicate_headers(cls, headers, key):
return ",".join(headers.getall(key, [])) or None
def tracing_middleware(app, client=None):
async def handle_request(request, handler):
elasticapm_client = get_client() if client is None else client
should_trace = elasticapm_client and not elasticapm_client.should_ignore_url(request.path)
if should_trace:
trace_parent = AioHttpTraceParent.from_headers(request.headers)
elasticapm_client.begin_transaction("request", trace_parent=trace_parent)
resource = request.match_info.route.resource
name = request.method
if resource:
# canonical has been added in 3.3, and returns one of path, formatter, prefix
for attr in ("canonical", "_path", "_formatter", "_prefix"):
if hasattr(resource, attr):
name += " " + getattr(resource, attr)
break
else:
name += " unknown route"
else:
name += " unknown route"
elasticapm.set_transaction_name(name, override=False)
elasticapm.set_context(
lambda: get_data_from_request(request, elasticapm_client.config, constants.TRANSACTION), "request"
)
try:
response = await handler(request)
if should_trace:
elasticapm.set_transaction_result("HTTP {}xx".format(response.status // 100), override=False)
elasticapm.set_transaction_outcome(http_status_code=response.status, override=False)
elasticapm.set_context(
lambda: get_data_from_response(response, elasticapm_client.config, constants.TRANSACTION),
"response",
)
return response
except HTTPException as exc:
# HTTPExceptions are response-like, e.g. have headers and status code. They can represent an HTTP
# response below a 500 status code and therefore not something to capture as exception. Like
# HTTPOk can be raised but will most likely be wrongly tagged as an APM error. Let's try and
# capture this according to the status.
if exc.status_code < 500 and not should_trace:
raise
if elasticapm_client:
elasticapm.set_transaction_result("HTTP {}xx".format(exc.status_code // 100), override=False)
elasticapm.set_transaction_outcome(http_status_code=exc.status_code, override=False)
elasticapm.set_context(
lambda: get_data_from_response(
exc, # noqa: F821
elasticapm_client.config,
constants.ERROR if exc.status_code >= 500 else constants.TRANSACTION, # noqa: F821
),
"response",
)
if exc.status_code >= 500:
elasticapm_client.capture_exception(
context={"request": get_data_from_request(request, elasticapm_client.config, constants.ERROR)}
)
raise
except Exception:
if elasticapm_client:
elasticapm.set_transaction_result("HTTP 5xx", override=False)
elasticapm.set_transaction_outcome(http_status_code=500, override=False)
elasticapm.set_context({"status_code": 500}, "response")
elasticapm_client.capture_exception(
context={"request": get_data_from_request(request, elasticapm_client.config, constants.ERROR)}
)
raise
finally:
elasticapm_client.end_transaction()
# decorating with @middleware is only required in aiohttp < 4.0, and we only support 3+
if aiohttp.__version__.startswith("3"):
return middleware(handle_request)
return handle_request
|
PypiClean
|
/sealea-1.4.tar.gz/sealea-1.4/instagrapi/mixins/highlight.py
|
import json
import random
import time
from pathlib import Path
from typing import Dict, List
from urllib.parse import urlparse
from instagrapi import config
from instagrapi.exceptions import HighlightNotFound
from instagrapi.extractors import extract_highlight_v1
from instagrapi.types import Highlight
from instagrapi.utils import dumps
class HighlightMixin:
def highlight_pk_from_url(self, url: str) -> str:
"""
Get Highlight PK from URL
Parameters
----------
url: str
URL of highlight
Returns
-------
str
Highlight PK
Examples
--------
https://www.instagram.com/stories/highlights/17895485201104054/ -> 17895485201104054
"""
assert '/highlights/' in url, 'URL must contain "/highlights/"'
path = urlparse(url).path
parts = [p for p in path.split("/") if p and p.isdigit()]
return str(parts[0])
def user_highlights_v1(self, user_id: int, amount: int = 0) -> List[Highlight]:
"""
Get a user's highlight
Parameters
----------
user_id: int
amount: int, optional
Maximum number of highlight to return, default is 0 (all highlights)
Returns
-------
List[Highlight]
A list of objects of Highlight
"""
amount = int(amount)
user_id = int(user_id)
params = {
"supported_capabilities_new": json.dumps(config.SUPPORTED_CAPABILITIES),
"phone_id": self.phone_id,
"battery_level": random.randint(25, 100),
"panavision_mode": "",
"is_charging": random.randint(0, 1),
"is_dark_mode": random.randint(0, 1),
"will_sound_on": random.randint(0, 1),
}
result = self.private_request(f"highlights/{user_id}/highlights_tray/", params=params)
return [
extract_highlight_v1(highlight)
for highlight in result.get("tray", [])
]
def user_highlights(self, user_id: int, amount: int = 0) -> List[Highlight]:
"""
Get a user's highlights
Parameters
----------
user_id: int
amount: int, optional
Maximum number of highlight to return, default is 0 (all highlights)
Returns
-------
List[Highlight]
A list of objects of Highlight
"""
return self.user_highlights_v1(user_id, amount)
def highlight_info_v1(self, highlight_pk: str) -> Highlight:
"""
Get Highlight by pk or id (by Private Mobile API)
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
Returns
-------
Highlight
An object of Highlight type
"""
highlight_id = f"highlight:{highlight_pk}"
data = {
"exclude_media_ids": "[]",
"supported_capabilities_new": json.dumps(config.SUPPORTED_CAPABILITIES),
"source": "profile",
"_uid": str(self.user_id),
"_uuid": self.uuid,
"user_ids": [highlight_id]
}
result = self.private_request('feed/reels_media/', data)
data = result['reels']
if highlight_id not in data:
raise HighlightNotFound(highlight_pk=highlight_pk, **data)
return extract_highlight_v1(data[highlight_id])
def highlight_info(self, highlight_pk: str) -> Highlight:
"""
Get Highlight by pk or id
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
Returns
-------
Highlight
An object of Highlight type
"""
return self.highlight_info_v1(highlight_pk)
def highlight_create(self, title: str, story_ids: List[str], cover_story_id: str = "", crop_rect: List[float] = [0.0, 0.21830457, 1.0, 0.78094524]) -> Highlight:
"""
Create highlight
Parameters
----------
title: str
Title
story_ids: List[str]
List of story ids
cover_story_id: str
User story as cover, default is first of story_ids
Returns
-------
Highlight
An object of Highlight type
"""
if not cover_story_id:
cover_story_id = story_ids[0]
data = {
"supported_capabilities_new": json.dumps(config.SUPPORTED_CAPABILITIES),
"source": "self_profile",
"creation_id": str(int(time.time())),
"_uid": str(self.user_id),
"_uuid": self.uuid,
"cover": dumps({
"media_id": self.media_id(cover_story_id),
"crop_rect": dumps(crop_rect)
}),
"title": title,
"media_ids": dumps([self.media_id(sid) for sid in story_ids])
}
result = self.private_request("highlights/create_reel/", data=data)
return extract_highlight_v1(result['reel'])
def highlight_edit(self, highlight_pk: str, title: str = "", cover: Dict = {}, added_media_ids: List[str] = [], removed_media_ids: List[str] = []):
data = {
"supported_capabilities_new": json.dumps(config.SUPPORTED_CAPABILITIES),
"source": "self_profile",
"_uid": str(self.user_id),
"_uuid": self.uuid,
"added_media_ids": dumps(added_media_ids),
"removed_media_ids": dumps(removed_media_ids)
}
if title:
data["title"] = title
if cover:
data["cover"] = dumps(cover)
result = self.private_request(f"highlights/highlight:{highlight_pk}/edit_reel/", data=data)
return extract_highlight_v1(result['reel'])
def highlight_change_title(self, highlight_pk: str, title: str) -> Highlight:
"""
Change title for highlight
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
title: str
Title of Highlight
Returns
-------
Highlight
"""
return self.highlight_edit(highlight_pk, title=title)
def highlight_change_cover(self, highlight_pk: str, cover_path: Path) -> Highlight:
"""
Change cover for highlight
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
cover_path: Path
Path to photo
Returns
-------
Highlight
"""
upload_id, width, height = self.photo_rupload(Path(cover_path))
cover = {"upload_id": str(upload_id), "crop_rect": "[0.0,0.0,1.0,1.0]"}
return self.highlight_edit(highlight_pk, cover=cover)
def highlight_add_stories(self, highlight_pk: str, added_media_ids: List[str]) -> Highlight:
"""
Add stories to highlight
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
removed_media_ids: List[str]
Remove stories from highlight
Returns
-------
Highlight
"""
return self.highlight_edit(highlight_pk, added_media_ids=added_media_ids)
def highlight_remove_stories(self, highlight_pk: str, removed_media_ids: List[str]) -> Highlight:
"""
Remove stories from highlight
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
removed_media_ids: List[str]
Remove stories from highlight
Returns
-------
Highlight
"""
return self.highlight_edit(highlight_pk, removed_media_ids=removed_media_ids)
def highlight_delete(self, highlight_pk: str) -> bool:
"""
Delete highlight
Parameters
----------
highlight_pk: str
Unique identifier of Highlight
Returns
-------
bool
"""
data = {"_uid": str(self.user_id), "_uuid": self.uuid}
result = self.private_request(f"highlights/highlight:{highlight_pk}/delete_reel/", data=data)
return result.get("status") == "ok"
|
PypiClean
|
/apache-beam-li-2.38.7.0.zip/apache-beam-li-2.38.7.0/apache_beam/runners/portability/portable_runner.py
|
# pytype: skip-file
# mypy: check-untyped-defs
import atexit
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import ValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
from apache_beam.portability.api import beam_runner_api_pb2
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self.artifact_endpoint = options.view_as(PortableOptions).artifact_endpoint
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
artifact_endpoint = (
self.artifact_endpoint or
prepare_response.artifact_staging_endpoint.url)
self.stage(
proto_pipeline,
artifact_endpoint,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc.RpcError as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
return self.encode_pipeline_options(all_options)
@staticmethod
def encode_pipeline_options(
all_options: Dict[str, Any]) -> 'struct_pb2.Struct':
def convert_pipeline_option_value(v):
# convert int values: BEAM-5509
if type(v) == int:
return str(v)
elif isinstance(v, ValueProvider):
return convert_pipeline_option_value(
v.get()) if v.is_accessible() else None
return v
# TODO: Define URNs for options.
p_options = {
'beam:option:' + k + ':v1': convert_pipeline_option_value(v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self,
proto_pipeline, # type: beam_runner_api_pb2.Pipeline
artifact_staging_endpoint,
staging_session_token
):
# type: (...) -> None
"""Stage artifacts"""
if artifact_staging_endpoint:
artifact_service.offer_artifacts(
beam_artifact_api_pb2_grpc.ArtifactStagingServiceStub(
channel=grpc.insecure_channel(artifact_staging_endpoint)),
artifact_service.ArtifactRetrievalService(
artifact_service.BeamFilesystemHandler(None).file_reader),
staging_session_token)
def run(self, preparation_id):
# type: (str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(preparation_id=preparation_id))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
raise NotImplementedError(
'You must specify a --job_endpoint when using --runner=PortableRunner. '
'Alternatively, you may specify which portable runner you intend to '
'use, such as --runner=FlinkRunner or --runner=SparkRunner.')
def create_job_service_handle(self, job_service, options):
# type: (...) -> JobServiceHandle
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer() # type: job_server.JobServer
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# TODO: https://issues.apache.org/jira/browse/BEAM-7199
# Eventually remove the 'pre_optimize' option alltogether and only perform
# the equivalent of the 'default' case below (minus the 'lift_combiners'
# part).
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'default').lower()
if (not options.view_as(StandardOptions).streaming and
pre_optimize != 'none'):
if pre_optimize == 'default':
phases = [
# TODO: https://issues.apache.org/jira/browse/BEAM-4678
# https://issues.apache.org/jira/browse/BEAM-11478
# Eventually remove the 'lift_combiners' phase from 'default'.
translations.pack_combiners,
translations.lift_combiners,
translations.sort_stages
]
partial = True
elif pre_optimize == 'all':
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = False
elif pre_optimize == 'all_except_fusion':
# TODO(BEAM-7248): Delete this branch after PortableRunner supports
# beam:runner:executable_stage:v1.
phases = [
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.pack_combiners,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# translations.sink_flattens,
# translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
]
partial = True
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in ('pack_combiners', 'lift_combiners'):
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
phases.append(translations.sort_stages)
partial = True
# All (known) portable runners (ie Flink and Spark) support these URNs.
known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=known_urns,
partial=partial)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
experiments = options.view_as(DebugOptions).experiments or []
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=
sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(experiments),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# Register an exit handler to ensure cleanup on exit.
atexit.register(functools.partial(result._cleanup, on_exit=True))
_LOGGER.info(
'Environment "%s" has started a component necessary for the '
'execution. Be sure to run the pipeline using\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.',
portable_options.environment_type)
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super().__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
self._runtime_exception = None
def cancel(self):
# type: () -> None
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# type: () -> str
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self, duration=None):
"""
:param duration: The maximum time in milliseconds to wait for the result of
the execution. If None or zero, will wait until the pipeline finishes.
:return: The result of the pipeline, i.e. PipelineResult.
"""
def read_messages():
# type: () -> None
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
message_thread = threading.Thread(
target=read_messages, name='wait_until_finish_read')
message_thread.daemon = True
message_thread.start()
if duration:
state_thread = threading.Thread(
target=functools.partial(self._observe_state, message_thread),
name='wait_until_finish_state_observer')
state_thread.daemon = True
state_thread.start()
start_time = time.time()
duration_secs = duration / 1000
while (time.time() - start_time < duration_secs and
state_thread.is_alive()):
time.sleep(1)
else:
self._observe_state(message_thread)
if self._runtime_exception:
raise self._runtime_exception
return self._state
def _observe_state(self, message_thread):
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
message_thread.join(10)
break
if self._state != runner.PipelineState.DONE:
self._runtime_exception = RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
except Exception as e:
self._runtime_exception = e
finally:
self._cleanup()
def _cleanup(self, on_exit=False):
# type: (bool) -> None
if on_exit and self._cleanup_callbacks:
_LOGGER.info(
'Running cleanup on exit. If your pipeline should continue running, '
'be sure to use the following syntax:\n'
' with Pipeline() as p:\n'
' p.apply(..)\n'
'This ensures that the pipeline finishes before this program exits.')
callback_exceptions = []
for callback in self._cleanup_callbacks:
try:
callback()
except Exception as e:
callback_exceptions.append(e)
self._cleanup_callbacks = ()
if callback_exceptions:
formatted_exceptions = ''.join(
[f"\n\t{repr(e)}" for e in callback_exceptions])
raise RuntimeError('Errors: {}'.format(formatted_exceptions))
|
PypiClean
|
/batchspawner-1.2.0.tar.gz/batchspawner-1.2.0/SPAWNERS.md
|
# Notes on specific spawners
**Spawner maintainers**: Included below are "spawner maintainers",
when available. There aren't official obligations, but the general
idea is that you should watch the repository and feel especially
empowered to comment on issues when you think it might be relevant to
you (obviously everyone should be, but this is our attempt at even
more outreach). You should let us know when we break something and
provide a diversity of opinions in general. Submitting PRs and
testing is nice but not required.
To be listed as a maintainer, just submit an issue or PR adding you,
and please watch the repository on Github.
## `TorqueSpawner`
Maintainers:
## `MoabSpawner`
Subclass of TorqueSpawner
Maintainers:
## `SlurmSpawner`
Maintainers: @rkdarst
This spawner enforces the environment if `srun` is used to wrap the
spawner command, which is the default. If you _do_ want user
environment to be used, set `req_srun=''`. However, this is not
perfect: there is still a bash shell begun as the user which could run
arbitrary startup, define shell aliases for `srun`, etc.
Use of `srun` is required to gracefully terminate.
## `GridengineSpawner`
Maintainers:
## `CondorSpawner`
Maintainers:
## `LsfSpawner`
Maintainers:
# Checklist for making spawners
Please document each of these things under the spawner list above, -
even if it is "OK", we need to track status of all spawners. If it is
a bug, users really need to know.
- Does your spawner read shell environment before starting? (See
[Jupyterhub
Security](https://jupyterhub.readthedocs.io/en/stable/reference/websecurity.html).
- Does your spawner send SIGTERM to the jupyterhub-singleuser process
before SIGKILL? It should, so that the process can terminate
gracefully. Add `echo "terminated gracefully"` to the end of the
batch script - if you see this in your singleuser server output, you
know that you DO receive SIGTERM and terminate gracefully. If your
batch system can not automatically send SIGTERM before SIGKILL, PR
#75 might help here, ask for it to be finished.
|
PypiClean
|
/qumulo_api-6.2.1-py3-none-any.whl/qumulo/lib/util.py
|
import os.path
import re
from contextlib import contextmanager
from typing import (
Callable,
cast,
Dict,
IO,
Iterable,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
from typing_extensions import Literal
def get_bytes(byte_string: str) -> int:
symbol = {
'KB': 10 ** 3,
'KiB': 2 ** 10,
'MB': 10 ** 6,
'MiB': 2 ** 20,
'GB': 10 ** 9,
'GiB': 2 ** 30,
'TB': 10 ** 12,
'TiB': 2 ** 40,
'PB': 10 ** 16,
'PiB': 2 ** 50,
'EB': 10 ** 18,
'EiB': 2 ** 60,
}
if byte_string.isdigit():
return int(byte_string)
elif byte_string[-2:] in symbol:
return int(float(byte_string[0:-2]) * symbol[byte_string[-2:]])
elif byte_string[-3:] in symbol:
return int(float(byte_string[0:-3]) * symbol[byte_string[-3:]])
else:
raise ValueError('Limit format is not acceptable!')
def humanize(num_bytes: Union[float, int]) -> str:
"""
Return a string represenation of @p num_bytes, up to 1 decimal place. Units are in base 10
(i.e., no kibibytes, etc.).
humanize(0) --> "0.0 B"
humanize(999) --> "1.0 KB"
humanize(1000) --> "1.0 KB"
humanize(1100) --> "1.1 KB"
humanize(8 * 10**15) --> "8.0 PB"
humanize(1.1 * 10**18) --> "1.1 EB"
"""
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
# Cast to a float first, for division below and for consistency in output.
value = float(num_bytes)
for unit in units:
# Round up next_value to 1 when value > 950
next_value = round(value / 1000, 1)
if next_value < 1:
return f'{value:.1f} {unit}'
value = next_value
raise ValueError(f'No human-readable unit for {value} bytes')
def bool_from_string(value: str) -> bool:
value = value.lower()
if value in ['t', 'true', '1', 'yes', 'on', 'enabled']:
return True
if value in ['f', 'false', '0', 'no', 'off', 'disabled']:
return False
raise ValueError('Unable to convert "%s" to boolean' % value)
figlet_yes_or_no = """\
__ _______ ____ _ _ ___ ___
\\ \\ / / ____/ ___| | \\ | |/ _ \\__ \\
\\ V /| _| \\___ \\ | \\| | | | |/ /
| | | |___ ___) | | |\\ | |_| |_|
|_| |_____|____/ or |_| \\_|\\___/(_) """
def ask(prompt: str, inputter: Callable[[str], str] = input) -> str:
return inputter(f'{prompt} ').strip().lower()
def are_you_sure(inputter: Callable[[str], str] = input) -> bool:
prompts = ['yes or no?', 'Yes or No?', 'YES or NO?', figlet_yes_or_no]
times = 0
answer = ask(prompts[times], inputter=inputter)
while answer not in ('yes', 'no'):
times += 1
answer = ask(prompts[min(times, len(prompts) - 1)], inputter=inputter)
return answer == 'yes'
# Join two paths, force basename to be relative
def path_join(dirname: str, basename: str) -> str:
if basename.startswith('/'):
basename = basename[1:]
return f'{dirname}/{basename}'
# Emulate UNIX basename behavior: basename('/foo/bar/') => 'bar'
def unix_path_split(path: str) -> Tuple[str, str]:
dirname, basename = os.path.split(path)
if not basename:
dirname, basename = os.path.split(dirname)
return (dirname, basename)
CERT_RE = re.compile(
r'-----BEGIN CERTIFICATE-----\s+' + r'([\S\s]*)\s+' + r'-----END CERTIFICATE-----'
)
def get_certificate_from_pem_format_string(content: str) -> Optional[str]:
match = CERT_RE.search(content)
return match.group(1) if match else None
RSA_PRIV_KEY_RE = re.compile(
r'-----BEGIN RSA PRIVATE KEY-----\s+' + r'([\S\s]*)\s+' + r'-----END RSA PRIVATE KEY-----'
)
def get_rsa_private_key_from_pem_format_string(content: str) -> Optional[str]:
match = RSA_PRIV_KEY_RE.search(content)
return match.group(1) if match else None
def tabulate(
table: Sequence[Sequence[object]],
headers: Union[None, Literal['firstrow'], Sequence[object]] = (),
) -> str:
"""
Print a pretty table with fixed-width columns.
@p table A list of rows, which are also lists. All rows must have the same length.
@p headers A list of column header strings, or "firstrow" to use the first
row as column headers, or None to not print column headers.
This implements a subset of the functionality of the tabulate module that's in the toolchain.
It is re-implemented to avoid taking that dependency for our public CLI package.
"""
# Pull the header row out of the table, if it is integrated:
if isinstance(headers, str) and headers == 'firstrow':
headers = table[0]
table = table[1:]
# Find the width of each column
if headers:
col_widths = [max(len(str(h)), 1) for h in headers]
else:
col_count = len(table[0]) if table else 0
col_widths = [1] * col_count
for row in table:
assert len(row) == len(col_widths)
col_widths = [max(m, len(str(v))) for m, v in zip(col_widths, row)]
sep = ' '
lines = []
if headers:
line = sep.join('{:<{}}'.format(str(h), w) for h, w in zip(headers, col_widths))
lines.append(line.rstrip())
lines.append(sep.join('=' * w for w in col_widths))
for row in table:
line = sep.join('{:<{}}'.format(str(v), w) for v, w in zip(row, col_widths))
lines.append(line.rstrip())
return '\n'.join(lines)
def edit_distance(str1: str, str2: str) -> int:
"""
Compute the Levenshtein distance between @p str1 and @p str2.
This is evolved from the Wagner-Fischer Dynamic Programming solution that has been posted in
multiple places all over Google.
"""
if len(str1) < len(str2):
str1, str2 = str2, str1
if len(str2) == 0:
return len(str1)
# DP Algo: You build an NxM matrix where
# N := len(str1)+1
# M := len(str2)+1.
# Note that N and M are 1 larger than the string lengths because we need a
# row and column in the matrix to represent the empty string on either
# side of the computation. Each cell will represent the minimum edit
# distance to get to a substring of str1 and str2 of lengths i and j
# where 0 <= i <= len(str1) and 0 <= j <= len(str2).
# ex: str1 = foo, str2 = boo
# b o o
# +---------------+
# | 0 | 1 | 2 | 3 |
# f | 1 | 1 | 2 | 3 |
# o | 2 | 2 | 1 | 2 |
# o | 3 | 3 | 2 | 1 | <--- min distance is 1
# +---------------+
# first, we start by populating the first row that represents if str1 was
# empty string. Hence, each cell is just the number of inserts to get to
# str2. +1 required since range is a half-open interval.
prev_row = list(range(len(str2) + 1))
for i, char1 in enumerate(str1):
# This is the first column which represents if str2 was empty but str1
# wasn't.
cur_row = [i + 1]
for j, char2 in enumerate(str2):
insertions = prev_row[j + 1] + 1
deletions = cur_row[j] + 1
substitutions = prev_row[j]
if char1 != char2:
substitutions += 1
cur_row.append(min(insertions, deletions, substitutions))
prev_row = cur_row
return prev_row[-1]
# _____ _ _ _ _
# |_ _|____ _| |_ / \ | (_) __ _ _ __ ___ _ __
# | |/ _ \ \/ / __| / _ \ | | |/ _` | '_ \ / _ \ '__|
# | | __/> <| |_ / ___ \| | | (_| | | | | __/ |
# |_|\___/_/\_\\__/_/ \_\_|_|\__, |_| |_|\___|_|
# |___/
# FIGLET: TextAligner
#
SINGLE_LEVEL_INDENT = ' ' * 4
TextAlignerValueType = Union[int, float, bool, bytes, str, Sequence[str], None]
class TextAligner:
"""
Builds up a set of lines of text, with padding for named fields to have the
same width in all lines where that field occurs.
This makes it easy to build up complex text where there is interleaving of
lines with different alignment structure.
"""
def __init__(self, indent: Optional[str] = None, max_width: Optional[int] = None):
self.indent_lvl = 0
# Tracks the maximum length for each field.
self.max_lengths: Dict[str, int] = {}
# Lines to format. tuple(indent, fmt, positional_args, aligned_kwargs)
self.lines: List[Tuple[int, str, Sequence[object], Mapping[str, object]]] = []
# Custom format specs for aligned fields.
self.formats: Dict[str, str] = {}
# Maximum column width. Changes based on indent level.
# XXX jkong: make max_width more general
# Used in some methods to concatenate shorter lines together, but does not generally wrap
# longer lines added to the class.
self.max_width = 80 if max_width is None else max_width
# The string used to indent lines
self.indent_val = SINGLE_LEVEL_INDENT if indent is None else indent
@contextmanager
def indented(self) -> Iterator[None]:
"""
Increase the indent level by one for all lines added within this
context. This may be nested for multiple indentation levels.
"""
self.indent_lvl += 1
self.max_width -= len(self.indent_val)
yield
self.indent_lvl -= 1
self.max_width += len(self.indent_val)
def set_padding(self, **kwargs: str) -> None:
"""
Override the default format specification that controls how named fields are padded.
Each keyword argument names a field, and the value provides the "[[fill]align][sign][#][0]"
subsection of the python format string specification mini-language. By default, fields are
right-padded with spaces.
e.g. self.set_padding(foo="0>") will result in the foo field being left-padded with zeroes.
"""
self.formats.update(kwargs)
def add_line(self, fmt: str, *args: object, **kwargs: object) -> None:
"""
Add a line to the text.
@p fmt A format string for the line.
@p extra_indent if given, added to the current indent level
@p args positional arguments to the format string, which will not be padded.
These must be fixed width in order to be interleaved with named fields
without breaking alignment.
@p kwargs keyword arguments to the format string, which will be padded for alignment.
If the same format prefix always precedes a given field, that field will
always start at the same column.
"""
extra_indent = cast(int, kwargs.pop('extra_indent', 0))
self.lines.append((self.indent_lvl + extra_indent, fmt, args, kwargs))
for name, value in kwargs.items():
self.max_lengths[name] = max(len(str(value)), self.max_lengths.get(name, 0))
def add_lines(self, lines: Iterable[str], *args: object, **kwargs: object) -> None:
"""
Simple helper function to ergonomically add multiple lines at once.
@p lines An iterable containing lines to add
"""
for line in lines:
# NB: This will result in the same set of *args being passed in for each line.
self.add_line(line, *args, **kwargs)
def format_list(
self, items: Iterable[str], sep: Optional[str] = None, max_len: Optional[int] = None
) -> Iterator[str]:
"""
Concatenates fragments made from @p items to build lines of at most @p max_len characters.
Each individual item must be shorter than max_len.
"""
sep = sep or ', '
max_len = max_len or self.max_width
# Account for adding separator at the end of the line
max_len -= len(sep)
line = ''
for i, item in enumerate(items):
if i == 0:
line = str(item)
elif len(line + sep + item) <= max_len:
line += f'{sep}{item}'
else:
yield f'{line}{sep}'.rstrip()
line = str(item)
yield line
def add_concatenated_lines(self, items: Iterable[str]) -> None:
"""
Simple wrapper function that takes in a list and adds width-formatted lines.
"""
self.add_lines(self.format_list(items))
def add_wrapped_table(self, table: Iterable[Tuple[str, TextAlignerValueType]]) -> None:
"""
Takes a list of pairs (tuples) and adds lines for a two-column table with wrapped rows.
@p table The list of (k, v) pairs to be turned into a table.
Currently only supports lists, ints, bools, floats, and strings.
"""
# Find the maximum left column length in characters
max_col = max(len(a[0]) for a in table) + len(self.indent_val)
for k, v in table:
# Format the right column
max_len = self.max_width - max_col
if isinstance(v, (int, float, bool, bytes)) or v is None:
v = str(v)
if isinstance(v, str):
line_gen = self.format_list(v.split(' '), sep=' ', max_len=max_len)
else:
line_gen = self.format_list(v, max_len=max_len)
lines = list(line_gen)
# Prepend the first line of values with the key, and then any
# following lines with the appropriate number of spaces
pad = ' ' * (max_col - len(k))
self.add_line(k + pad + lines[0])
for line in lines[1:]:
self.add_line((' ' * max_col) + line)
def write(self, outfile: IO[str]) -> None:
for indent, fmt, positional, aligned in self.lines:
# Pad all the aligned fields to the observed max width:
padded = {
# NB: str(v) to avoid format trying to get clever based on type, e.g. formatting
# True as "1"
t: '{:{f}{w}}'.format(str(v), w=self.max_lengths[t], f=self.formats.get(t, '<'))
for t, v in aligned.items()
}
outfile.write(str(self.indent_val * indent))
outfile.write(str(fmt.format(*positional, **padded)))
outfile.write('\n')
|
PypiClean
|
/apache_superset_iteco-2.1.1.4-py3-none-any.whl/superset/migrations/versions/2018-07-19_23-41_c617da68de7d_form_nullable.py
|
# revision identifiers, used by Alembic.
revision = "c617da68de7d"
down_revision = "18dc26817ad2"
from alembic import op
from sqlalchemy import Column, Integer, String, Text
from sqlalchemy.ext.declarative import declarative_base
from superset import db
from superset.utils.core import MediumText
Base = declarative_base()
class BaseColumnMixin:
id = Column(Integer, primary_key=True)
column_name = Column(String(255))
description = Column(Text)
type = Column(String(32))
verbose_name = Column(String(1024))
class BaseDatasourceMixin:
id = Column(Integer, primary_key=True)
description = Column(Text)
class BaseMetricMixin:
id = Column(Integer, primary_key=True)
d3format = Column(String(128))
description = Column(Text)
metric_name = Column(String(512))
metric_type = Column(String(32))
verbose_name = Column(String(1024))
warning_text = Column(Text)
class Annotation(Base):
__tablename__ = "annotation"
id = Column(Integer, primary_key=True)
long_descr = Column(Text)
json_metadata = Column(Text)
short_descr = Column(String(500))
class Dashboard(Base):
__tablename__ = "dashboards"
id = Column(Integer, primary_key=True)
css = Column(Text)
dashboard_title = Column(String(500))
description = Column(Text)
json_metadata = Column(Text)
position_json = Column(MediumText())
slug = Column(String(255))
class Database(Base):
__tablename__ = "dbs"
id = Column(Integer, primary_key=True)
database_name = Column(String(250))
extra = Column(Text)
force_ctas_schema = Column(String(250))
sqlalchemy_uri = Column(String(1024))
verbose_name = Column(String(250))
class DruidCluster(Base):
__tablename__ = "clusters"
id = Column(Integer, primary_key=True)
broker_host = Column(String(255))
broker_endpoint = Column(String(255))
cluster_name = Column(String(250))
verbose_name = Column(String(250))
class DruidColumn(BaseColumnMixin, Base):
__tablename__ = "columns"
dimension_spec_json = Column(Text)
class DruidDatasource(BaseDatasourceMixin, Base):
__tablename__ = "datasources"
datasource_name = Column(String(255))
default_endpoint = Column(Text)
fetch_values_from = Column(String(100))
class DruidMetric(BaseMetricMixin, Base):
__tablename__ = "metrics"
json = Column(Text)
class Slice(Base):
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
description = Column(Text)
params = Column(Text)
slice_name = Column(String(250))
viz_type = Column(String(250))
class SqlaTable(BaseDatasourceMixin, Base):
__tablename__ = "tables"
default_endpoint = Column(MediumText())
fetch_values_predicate = Column(String(1000))
main_dttm_col = Column(String(250))
schema = Column(String(255))
sql = Column(Text)
table_name = Column(String(250))
template_params = Column(Text)
class SqlMetric(BaseMetricMixin, Base):
__tablename__ = "sql_metrics"
expression = Column(Text)
class TableColumn(BaseColumnMixin, Base):
__tablename__ = "table_columns"
database_expression = Column(String(255))
expression = Column(Text)
python_date_format = Column(String(255))
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
tables = [
Annotation,
Dashboard,
Database,
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
Slice,
SqlaTable,
SqlMetric,
TableColumn,
]
for table in tables:
for record in session.query(table).all():
for col in record.__table__.columns.values():
if not col.primary_key:
if getattr(record, col.name) == "":
setattr(record, col.name, None)
session.commit()
session.close()
def downgrade():
pass
|
PypiClean
|
/openpmd-beamphysics-0.8.0.tar.gz/openpmd-beamphysics-0.8.0/pmd_beamphysics/readers.py
|
from .units import dimension, dimension_name, SI_symbol, pg_units, c_light, e_charge
from .tools import decode_attrs, decode_attr
import h5py
import numpy as np
#-----------------------------------------
# General Utilities
#-----------------------------------------
# Records, components, units
particle_record_components = {
'branchIndex':None,
'chargeState':None,
'electricField':['x', 'y', 'z'],
'elementIndex':None,
'magneticField':['x', 'y', 'z'],
'locationInElement': None,
'momentum':['x', 'y', 'z'],
'momentumOffset':['x', 'y', 'z'],
'photonPolarizationAmplitude':['x', 'y'],
'photonPolarizationPhase':['x', 'y'],
'sPosition':None,
'totalMomentum':None,
'totalMomentumOffset':None,
#'particleCoordinatesToGlobalTransformation': ??
'particleStatus':None,
'pathLength':None,
'position':['x', 'y', 'z'],
'positionOffset':['x', 'y', 'z'],
'spin':['x', 'y', 'z', 'theta', 'phi', 'psi'],
'time':None,
'timeOffset':None,
'velocity':['x', 'y', 'z'],
'weight':None
}
field_record_components = {
'electricField':['x', 'y', 'z', 'r', 'theta'],
'magneticField':['x', 'y', 'z', 'r', 'theta']
}
# Expected unit dimensions for particle and field records
expected_record_unit_dimension = {
'branchIndex':dimension('1'),
'chargeState':dimension('1'),
'electricField':dimension('electric_field'),
'magneticField':dimension('magnetic_field'),
'elementIndex':dimension('1'),
'locationInElement': dimension('1'),
'momentum':dimension('momentum'),
'momentumOffset':dimension('momentum'),
'photonPolarizationAmplitude':dimension('electric_field'),
'photonPolarizationPhase':dimension('1'),
'sPosition':dimension('length'),
'totalMomentum':dimension('momentum'),
'totalMomentumOffset':dimension('momentum'),
#'particleCoordinatesToGlobalTransformation': ??
'particleStatus':dimension('1'),
'pathLength':dimension('length'),
'position':dimension('length'),
'positionOffset':dimension('length'),
'spin':dimension('1'),
'time':dimension('time'),
'timeOffset':dimension('time'),
'velocity':dimension('velocity'),
'weight':dimension('charge')
}
# Convenient aliases for components
component_from_alias = {
# 'x':'position/x',
# 'y':'position/y',
# 'z':'position/z',
# 'px':'momentum/x',
# 'py':'momentum/y',
# 'pz':'momentum/z',
't':'time',
'weight':'weight',
'status':'particleStatus'
}
# Aliases for particles and fields
for g, prefix in zip(['position', 'momentum', 'electricField', 'magneticField'],
['', 'p', 'E', 'B']):
for c in ['x', 'y', 'z', 'r', 'theta']:
alias = prefix+c
component_from_alias[alias] = g+'/'+c
# Inverse
component_alias = {v:k for k,v in component_from_alias.items()}
def particle_paths(h5, key='particlesPath'):
"""
Uses the basePath and particlesPath to find where openPMD particles should be
"""
basePath = h5.attrs['basePath'].decode('utf-8')
particlesPath = h5.attrs[key].decode('utf-8')
if '%T' not in basePath:
return [basePath+particlesPath]
path1, path2 = basePath.split('%T')
tlist = list(h5[path1])
paths = [path1+t+path2+particlesPath for t in tlist]
return paths
def field_paths(h5, key='externalFieldPath'):
"""
Looks for the External Fields
"""
if key not in h5.attrs:
return []
fpath = h5.attrs[key].decode('utf-8')
if '%T' not in fpath:
return [fpath]
path1 = fpath.split('%T')[0]
tlist = list(h5[path1])
paths = [path1+t for t in tlist]
return paths
def is_constant_component(h5):
"""
Constant record component should have 'value' and 'shape'
"""
return 'value' and 'shape' in h5.attrs
def constant_component_value(h5):
"""
Constant record component should have 'value' and 'shape'
"""
unitSI = h5.attrs['unitSI']
val = h5.attrs['value']
if unitSI == 1.0:
return val
else:
return val*unitSI
def component_unit_dimension(h5):
"""
Return the unit dimension tuple
"""
return tuple(h5.attrs['unitDimension'])
def component_data(h5, slice = slice(None), unit_factor=1):
"""
Returns a numpy array from an h5 component.
Determines wheter a component has constant data, or array data, and returns that.
An optional slice allows parts of the array to be retrieved.
This checks for a gridDataOrder attribute: F or C. If F, the np array is transposed.
Unit factor is an additional factor to convert from SI units to output units.
"""
# look for unitSI factor.
if 'unitSI' in h5.attrs:
factor = h5.attrs['unitSI']
else:
factor = 1
# Additional conversion factor
if unit_factor:
factor *= unit_factor
if is_constant_component(h5):
dat = np.full(h5.attrs['shape'], h5.attrs['value'])[slice]
# Check multidimensional for data ordering
elif len(h5.shape) > 1:
# Check for Fortran order
if 'gridDataOrder' in h5.attrs and decode_attr(h5.attrs['gridDataOrder'])=='F':
if isinstance(slice, tuple):
# Need to transpose the slice ordering
slice = slice[::-1]
# Retrieve dataset and transpose for C order
dat = h5[slice]
dat = np.transpose(dat)
else:
# C-order
dat = h5[slice]
# 1-D array
else:
dat = h5[slice]
if factor != 1:
dat *= factor
return dat
def offset_component_name(component_name):
"""
Many components can also have an offset, as in:
position/x
positionOffset/c
Return the appropriate name.
"""
x = component_name.split('/')
if len(x) == 1:
return x[0]+'Offset'
else:
return x[0]+'Offset/'+x[1]
def particle_array(h5, component, slice=slice(None), include_offset=True):
"""
Main routine to return particle arrays in fixed units.
All units are SI except momentum, which will be in eV/c.
Example:
particle_array(h5['data/00001/particles/'], 'px')
Will return the momentum/x + momentumOffset/x in eV/c.
"""
# Handle aliases
if component in component_from_alias:
component = component_from_alias[component]
if component in ['momentum/x', 'momentum/y', 'momentum/z']:
unit_factor = (c_light / e_charge ) # convert J/(m/s) to eV/c
else:
unit_factor = 1.0
# Get data
dat = component_data(h5[component], slice = slice, unit_factor=unit_factor)
# Look for offset component
ocomponent = offset_component_name(component)
if include_offset and ocomponent in h5 :
offset = component_data(h5[ocomponent], slice = slice, unit_factor=unit_factor)
dat += offset
return dat
def all_components(h5):
"""
Look for possible components in a particle group
"""
components = []
for record_name in h5:
if record_name not in particle_record_components:
continue
# Look for components
possible_components = particle_record_components[record_name]
if not possible_components:
# Record is a component
components.append(record_name)
else:
g = h5[record_name]
for cname in possible_components:
if cname in g:
components.append(record_name+'/'+cname)
return components
def component_str(particle_group, name):
"""
Informational string from a component in a particle group (h5)
"""
g = particle_group[name]
record_name = name.split('/')[0]
expected_dimension = expected_record_unit_dimension[record_name]
this_dimension = component_unit_dimension(g)
dname = dimension_name(this_dimension)
symbol = SI_symbol[dname]
s = name+' '
if is_constant_component(g):
val = constant_component_value(g)
shape = g.attrs['shape']
s += f'[constant {val} with shape {shape}]'
else:
s += '['+str(len(g))+' items]'
if symbol != '1':
s += f' is a {dname} with units: {symbol}'
if expected_dimension != this_dimension:
s +=', but expected units: '+ SI_symbol[dimension_name(expected_dimension)]
return s
#----------------------------------
# Fields
required_field_attrs = [
# strings
'eleAnchorPt', 'gridGeometry', 'axisLabels',
# reals and ints
'gridLowerBound', 'gridOriginOffset', 'gridSpacing', 'gridSize', 'harmonic'
]
# Dict with options
optional_field_attrs = {
'name':None,
'gridCurvatureRadius':None,
'fundamentalFrequency':0,
'RFphase':0,
'fieldScale':1.0,
'masterParameter':None
}
def load_field_attrs(attr, verbose=False):
"""
Loads FieldMesh required and optional attributes from a dict_like object.
Non-standard attributes will be collected in an 'other' dict.
Returns dicts:
attrs, other
"""
# Get all attrs. Will pop.
a = dict(attr)
attrs = {}
other = {}
# Required
for k in required_field_attrs:
attrs[k] = a.pop(k)
# Optional, filling in some defaults
for k in optional_field_attrs:
if k in a:
attrs[k] = a.pop(k)
else:
v = optional_field_attrs[k]
if v is not None:
attrs[k] = v
# Collect other.
for k, v in a.items():
other[k]= v
if verbose:
print('Nonstandard attr:', k, v)
# Decode
attrs = decode_attrs(attrs)
# Error checking
#if attrs['harmonic'] > 0:
# assert 'fundamentalFrequency' in attrs, 'fundamentalFrequency required if harmonic > 0'
return attrs, other
|
PypiClean
|
/xrpl_py-2.4.0b0-py3-none-any.whl/xrpl/models/transactions/xchain_create_bridge.py
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, Optional
from xrpl.models.currencies import XRP
from xrpl.models.required import REQUIRED
from xrpl.models.transactions.transaction import Transaction
from xrpl.models.transactions.types import TransactionType
from xrpl.models.utils import require_kwargs_on_init
from xrpl.models.xchain_bridge import XChainBridge
@require_kwargs_on_init
@dataclass(frozen=True)
class XChainCreateBridge(Transaction):
"""
Represents a XChainCreateBridge transaction.
The XChainCreateBridge transaction creates a new `Bridge` ledger object and
defines a new cross-chain bridge entrance on the chain that the transaction
is submitted on. It includes information about door accounts and assets for
the bridge.
"""
xchain_bridge: XChainBridge = REQUIRED # type: ignore
"""
The bridge (door accounts and assets) to create. This field is required.
:meta hide-value:
"""
signature_reward: str = REQUIRED # type: ignore
"""
The total amount to pay the witness servers for their signatures. This
amount will be split among the signers. This field is required.
:meta hide-value:
"""
min_account_create_amount: Optional[str] = None
"""
The minimum amount, in XRP, required for a ``XChainAccountCreateCommit``
transaction. If this isn't present, the ``XChainAccountCreateCommit``
transaction will fail. This field can only be present on XRP-XRP bridges.
:meta hide-value:
"""
transaction_type: TransactionType = field(
default=TransactionType.XCHAIN_CREATE_BRIDGE,
init=False,
)
def _get_errors(self: XChainCreateBridge) -> Dict[str, str]:
errors = super()._get_errors()
bridge = self.xchain_bridge
if bridge.locking_chain_door == bridge.issuing_chain_door:
errors[
"xchain_bridge"
] = "Cannot have the same door accounts on the locking and issuing chain."
if self.account not in [bridge.locking_chain_door, bridge.issuing_chain_door]:
errors[
"account"
] = "Account must be either locking chain door or issuing chain door."
if (bridge.locking_chain_issue == XRP()) != (
bridge.issuing_chain_issue == XRP()
):
errors["issue"] = "Bridge must be XRP-XRP or IOU-IOU."
if (
self.min_account_create_amount is not None
and bridge.locking_chain_issue != XRP()
):
errors[
"min_account_create_amount"
] = "Cannot have MinAccountCreateAmount if bridge is IOU-IOU."
if self.signature_reward != REQUIRED and not self.signature_reward.isnumeric():
errors["signature_reward"] = "signature_reward must be numeric."
if (
self.min_account_create_amount is not None
and not self.min_account_create_amount.isnumeric()
):
errors[
"min_account_create_amount_value"
] = "min_account_create_amount must be numeric."
return errors
|
PypiClean
|
/RegScale_CLI-5.15.0-py3-none-any.whl/regscale/integrations/commercial/gitlab.py
|
""" RegScale Gitlab integration"""
# standard python imports
import sys
import os
import re
import click
import requests
import markdown
from urllib.parse import urljoin
from regscale.core.app.logz import create_logger
from regscale.core.app.utils.app_utils import (
check_license,
create_progress_object,
error_and_exit,
get_current_datetime,
)
from regscale.core.app.internal.login import is_valid
from regscale.models import regscale_id, regscale_module
from regscale.models.regscale_models.issue import Issue
from regscale.models.regscale_models.link import Link
job_progress = create_progress_object()
logger = create_logger()
@click.group()
def gitlab():
"""GitLab integration to pull issues via API."""
@gitlab.command(
name="sync_issues", help="Intgration to GitLab to sync issues into a module."
)
@regscale_id()
@regscale_module()
@click.option(
"--gitlab_url", "-u", default="https://gitlab.com", help="GitLab URL", required=True
)
@click.option(
"--gitlab_project_id",
"-gpid",
required=True,
help="The ID of the GitLab project to pull issues from.",
default=os.environ.get("GITLAB_PROJECT"),
)
@click.option(
"--api_token",
"-t",
required=True,
help="Your GitLab API token with API read access.",
default=os.environ.get("GITLAB_API_TOKEN"),
)
@click.option(
"--include_links",
"-l",
is_flag=True,
help="Include links from the issue description.",
default=False,
)
def sync_issues(
regscale_id: int,
regscale_module: str,
gitlab_url: str,
gitlab_project_id: int,
api_token: str,
include_links: bool,
):
"""
Sync issues from a GitLab project into a module.
:param int regscale_id:
:param str regscale_module:
:param str gitlab_url:
:param int gitlab_project_id:
:param str api_token:
:param bool include_links:
:return: None
"""
run_sync_issues(
regscale_id=regscale_id,
regscale_module=regscale_module,
gitlab_url=gitlab_url,
gitlab_project_id=gitlab_project_id,
api_token=api_token,
include_links=include_links,
)
def run_sync_issues(
regscale_id: int,
regscale_module: str,
gitlab_url: str,
gitlab_project_id: int,
api_token: str,
include_links: bool,
) -> None:
"""
Sync issues from a GitLab project into a module.
:param int regscale_id:
:param str regscale_module:
:param str gitlab_url:
:param int gitlab_project_id:
:param str api_token:
:param bool include_links:
:return: None
"""
app = check_license()
if not is_valid(app=app):
logger.warn("RegScale token is invalid. please login.")
sys.exit(1)
with job_progress:
gitlab_issues = get_issues_from_gitlab(
gitlab_url, gitlab_project_id, api_token, job_progress
)
regscale_issues = get_regscale_issues(
regscale_id, regscale_module, job_progress
)
logger.debug(f"Fetched {len(regscale_issues)} issues from RegScale.")
# Convert the issues to your desired format
issues = convert_issues(
gitlab_issues,
regscale_id,
regscale_module,
include_links,
job_progress,
)
# # Save or update the converted issues
save_or_update_issues(issues, regscale_issues, job_progress)
def get_regscale_issues(regscale_id: int, regscale_module: str, job_progress) -> list:
"""
Function to fetch issues from RegScale
:param int regscale_id:
:param str regscale_module:
:param job_progress:
:return: list of regscale issues
:rtype: list
"""
app = check_license()
task = job_progress.add_task("[#f8b737]Fetching issues from regscale", total=1)
if regscale_module == "securityplans":
existing_issues = Issue.fetch_issues_by_ssp(app=app, ssp_id=regscale_id)
logger.info(f"Fetched {len(existing_issues)} issues from RegScale by SSP.")
else:
existing_issues = Issue.fetch_issues_by_parent(
app=app, regscale_id=regscale_id, regscale_module=regscale_module
)
logger.info(
f"Fetched {len(existing_issues)} issues from RegScale by issue parent."
)
job_progress.update(task, advance=1)
return existing_issues
def save_or_update_issues(
gitlab_issues: list, regscale_issues: list, job_progress
) -> None:
"""
Function to save or update issues from GitLab to RegScale
:param list gitlab_issues:
:param list regscale_issues:
:param job_progress:
:return: None
:rtype: None
"""
app = check_license()
# figure out which issues need to be updated vs inserted
task = job_progress.add_task(
"[#f8b737]Saving issues from GitLab to RegScale...",
total=len(gitlab_issues),
)
regscale_dict = {
regscale_issue.dependabotId: regscale_issue
for regscale_issue in regscale_issues
}
for gitlab_issue_obj in gitlab_issues:
gitlab_issue = gitlab_issue_obj.get("issue")
# if we have the issue already in the regscale dict, check and update it
if gitlab_issue.dependabotId in regscale_dict:
regscale_issue = regscale_dict.get(gitlab_issue.dependabotId)
# has it updated?
if regscale_issue.__eq__(gitlab_issue) is False:
gitlab_issue.id = regscale_issue.id
try:
Issue.update_issue(app=app, issue=gitlab_issue)
logger.info(f"Updated issue {gitlab_issue.id}")
except Exception as vex:
logger.error(vex)
existing_links = Link.fetch_links_by_parent(
app, gitlab_issue.id, "issues"
)
for link in gitlab_issue_obj.get("links", []):
# set here for comparison purposes
link.parentID = gitlab_issue.id
if link not in existing_links:
try:
new_link = Link.insert_link(app=app, link=link)
logger.info(f"Inserted link {new_link.id}")
# Add the new link to the existing_links list
existing_links.append(new_link)
except Exception as ex:
logger.error(ex)
# insert new issue
else:
try:
issue = Issue.insert_issue(app=app, issue=gitlab_issue)
if issue is not None and issue.id is not None:
logger.info(f"Inserted issue {issue.id}")
except Exception as ex:
logger.error(ex)
for link in gitlab_issue_obj.get("links", []):
link.parentID = issue.id
try:
new_link = Link.insert_link(app=app, link=link)
except Exception as ex:
logger.error(ex)
job_progress.update(task, advance=1)
def extract_links_with_labels(
text: str, parent_id: int, parent_module: str
) -> list[Link]:
"""
Extract links from an issue description text with labels.
:param text: The issue description containing links.
:param parent_id: The parent ID associated with the parent module.
:param parent_module: The parent module associated with the parent ID.
:return: A list of Link objects extracted from the text.
"""
results = []
url_pattern = re.compile(
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
for line in text.split("\n"):
if ":" in line and ("Link" in line or "link" in line):
label, url = line.split(":", 1)
url = url.strip().replace("<br>", "")
if url.startswith("https:"):
url = url[6:].strip()
if url_pattern.match(url):
results.append(
Link(
title=label.replace("-", "").strip(),
url=url,
parentID=parent_id,
parentModule=parent_module,
)
)
return results
def convert_issues(
gitlab_issues: list,
regscale_id: int,
regscale_module: str,
include_links: bool,
job_progress,
) -> list:
"""
Converts issues from GitLab to regscale
:param list gitlab_issues:
:param int regscale_id:
:param str regscale_module:
:param bool include_links:
:param job_progress:
"""
app = check_license()
task = job_progress.add_task(
"[#f8b737]Converting issues from gitlab...", total=len(gitlab_issues)
)
regscale_issues = []
for issue in gitlab_issues:
status = "Open"
if issue.get("state"):
if issue.get("state") == "open":
status = "Open"
elif issue.get("state") == "closed":
status = "Closed"
severity_level = Issue.assign_severity(issue.get("weight", 0))
# Convert the issue to your desired format
converted_issue = Issue(
title=issue["title"],
description=str(markdown.markdown(issue["description"])),
severityLevel=severity_level,
issueOwnerId=app.config["userId"],
costEstimate=0,
levelOfEffort=0,
dueDate=issue["due_date"],
identification="Other",
dependabotId=str(issue["id"]),
dateCreated=issue["created_at"],
parentId=regscale_id,
parentModule=regscale_module,
status=status,
securityPlanId=regscale_id if regscale_module == "securityplans" else None,
componentId=regscale_id if regscale_module == "components" else None,
)
if converted_issue.status == "Closed":
if issue.get("closed_at"):
converted_issue.dateCompleted = issue.get("closed_at")
else:
converted_issue.dateCompleted = get_current_datetime()
# Extract the links from the description
if include_links:
links = extract_links_with_labels(issue["description"], 0, "issues")
regscale_issues.append({"issue": converted_issue, "links": links})
else:
regscale_issues.append({"issue": converted_issue, "links": []})
job_progress.update(task, advance=1)
return regscale_issues
def get_issues_from_gitlab(
gitlab_url: str, gitlab_project_id: int, api_token: str, job_progress
) -> list:
"""
Fetch issues from GitLab
:param str gitlab_url:
:param int gitlab_project_id:
:param str api_token:
:param job_progress:
"""
# Define the GitLab API URL for issues
api_call = f"/api/v4/projects/{gitlab_project_id}/issues"
url = urljoin(gitlab_url, api_call)
logger.info("Fetching issues from gitlab...")
logger.debug(f"Fetching with API token {api_token}")
# Define the headers, including your API token
headers = {"Private-Token": api_token}
# Send a GET request to the API
fetching_issues = job_progress.add_task(
"[#f8b737]Fetching issues from gitlab...", total=1
)
response = requests.get(url, headers=headers)
job_progress.update(fetching_issues, advance=1)
issues = []
# If the request was successful
if response.ok:
# Load the issues from the response
issues = response.json()
logger.info(f"Fetched {len(issues)} issues from gitlab")
else:
logger.error(response.status_code)
logger.error(response.text)
error_and_exit(
f"Failed to get issues from GitLab. Status code: {response.status_code}"
)
return issues
|
PypiClean
|
/nertivia.py-0.2.9-py3-none-any.whl/engineioN/asyncio_client.py
|
import asyncio
import signal
import ssl
import threading
import aiohttp
from . import client
from . import exceptions
from . import packet
from . import payload
async_signal_handler_set = False
def async_signal_handler():
"""SIGINT handler.
Disconnect all active async clients.
"""
async def _handler():
asyncio.get_event_loop().stop()
for c in client.connected_clients[:]:
if c.is_asyncio_based():
await c.disconnect()
else: # pragma: no cover
pass
asyncio.ensure_future(_handler())
class AsyncClient(client.Client):
"""An Engine.IO client for asyncio.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``aiohttp.ClientSession`` object to be
used when sending requests to the server. Use it if
you need to add special client options such as proxy
servers, SSL certificates, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
def is_asyncio_based(self):
return True
async def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Note: this method is a coroutine.
Example usage::
eio = engineioN.Client()
await eio.connect('http://localhost:5000')
"""
global async_signal_handler_set
if not async_signal_handler_set and threading.current_thread() == threading.main_thread():
try:
asyncio.get_event_loop().add_signal_handler(
signal.SIGINT, async_signal_handler)
async_signal_handler_set = True
except NotImplementedError: # pragma: no cover
self.logger.warning('Signal handler is unsupported')
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, str):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return await getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
async def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
Note: this method is a coroutine.
"""
if self.read_loop_task:
await self.read_loop_task
async def send(self, data):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
Note: this method is a coroutine.
"""
await self._send_packet(packet.Packet(packet.MESSAGE, data=data))
async def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
Note: this method is a coroutine.
"""
if self.state == 'connected':
await self._send_packet(packet.Packet(packet.CLOSE))
await self.queue.put(None)
self.state = 'disconnecting'
await self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
await self.ws.close()
if not abort:
await self.read_loop_task
self.state = 'disconnected'
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
Note: this method is a coroutine.
"""
return asyncio.ensure_future(target(*args, **kwargs))
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time.
Note: this method is a coroutine.
"""
return await asyncio.sleep(seconds)
def create_queue(self):
"""Create a queue object."""
q = asyncio.Queue()
q.Empty = asyncio.QueueEmpty
return q
def create_event(self):
"""Create an event object."""
return asyncio.Event()
def _reset(self):
if self.http: # pragma: no cover
asyncio.ensure_future(self.http.close())
super()._reset()
async def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp not installed -- cannot make HTTP '
'requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status < 200 or r.status >= 300:
self._reset()
try:
arg = await r.json()
except aiohttp.ClientError:
arg = None
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status), arg)
try:
p = payload.Payload(encoded_payload=(await r.read()).decode(
'utf-8'))
except ValueError:
raise exceptions.ConnectionError(
'Unexpected response from server') from None
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
await self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if await self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
async def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if aiohttp is None: # pragma: no cover
self.logger.error('aiohttp package not installed')
return False
websocket_url = self._get_engineio_url(url, engineio_path,
'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
if self.http is None or self.http.closed: # pragma: no cover
self.http = aiohttp.ClientSession()
# extract any new cookies passed in a header so that they can also be
# sent the the WebSocket route
cookies = {}
for header, value in headers.items():
if header.lower() == 'cookie':
cookies = dict(
[cookie.split('=', 1) for cookie in value.split('; ')])
del headers[header]
break
self.http.cookie_jar.update_cookies(cookies)
try:
if not self.ssl_verify:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
ws = await self.http.ws_connect(
websocket_url + self._get_url_timestamp(),
headers=headers, ssl=ssl_context)
else:
ws = await self.http.ws_connect(
websocket_url + self._get_url_timestamp(),
headers=headers)
except (aiohttp.client_exceptions.WSServerHandshakeError,
aiohttp.client_exceptions.ServerConnectionError,
aiohttp.client_exceptions.ClientConnectionError):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode()
try:
await ws.send_str(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = (await ws.receive()).data
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
await ws.send_str(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = (await ws.receive()).data
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
async def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
await self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PING:
await self._send_packet(packet.Packet(packet.PONG, pkt.data))
elif pkt.packet_type == packet.CLOSE:
await self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
async def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
await self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
async def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None or self.http.closed:
self.http = aiohttp.ClientSession()
http_method = getattr(self.http, method.lower())
try:
if not self.ssl_verify:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
else:
return await http_method(
url, headers=headers, data=body,
timeout=aiohttp.ClientTimeout(total=timeout))
except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret
async def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = await self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
await self.queue.put(None)
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
await self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=(await r.read()).decode(
'utf-8'))
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
await self.queue.put(None)
break
for pkt in p.packets:
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = await asyncio.wait_for(
self.ws.receive(),
timeout=self.ping_interval + self.ping_timeout)
p = p.data
if p is None: # pragma: no cover
await self.queue.put(None)
break # the connection is broken
except asyncio.TimeoutError:
self.logger.warning(
'Server has stopped communicating, aborting')
await self.queue.put(None)
break
except aiohttp.client_exceptions.ServerDisconnectedError:
self.logger.info(
'Read loop: WebSocket connection was closed, aborting')
await self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
await self.queue.put(None)
break
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
await self.queue.put(None)
break
await self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
await self.write_loop_task
if self.state == 'connected':
await self._trigger_event('disconnect', run_async=False)
try:
client.connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
async def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [await asyncio.wait_for(self.queue.get(), timeout)]
except (self.queue.Empty, asyncio.TimeoutError,
asyncio.CancelledError):
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get_nowait())
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = await self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status < 200 or r.status >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
if pkt.binary:
await self.ws.send_bytes(pkt.encode())
else:
await self.ws.send_str(pkt.encode())
self.queue.task_done()
except (aiohttp.client_exceptions.ServerDisconnectedError,
BrokenPipeError, OSError):
self.logger.info(
'Write loop: WebSocket connection was closed, '
'aborting')
break
self.logger.info('Exiting write loop task')
|
PypiClean
|
/odoo_addon_l10n_nl_account_tax_unece-15.0.1.0.0.2-py3-none-any.whl/odoo/addons/l10n_nl_account_tax_unece/models/res_company.py
|
from odoo import api, models
MAPPING = {
"btw_0": {"categ": "tax_categ_z"},
"btw_6": {"categ": "tax_categ_aa"},
"btw_9": {"categ": "tax_categ_aa"},
"btw_21": {"categ": "tax_categ_h"},
"btw_overig": {"categ": "tax_categ_s"},
"btw_0_d": {"categ": "tax_categ_z"},
"btw_6_d": {"categ": "tax_categ_aa"},
"btw_9_d": {"categ": "tax_categ_aa"},
"btw_21_d": {"categ": "tax_categ_h"},
"btw_overig_d": {"categ": "tax_categ_s"},
"btw_6_buy": {"categ": "tax_categ_aa"},
"btw_6_buy_incl": {"categ": "tax_categ_aa"},
"btw_9_buy": {"categ": "tax_categ_aa"},
"btw_9_buy_incl": {"categ": "tax_categ_aa"},
"btw_21_buy": {"categ": "tax_categ_h"},
"btw_21_buy_incl": {"categ": "tax_categ_h"},
"btw_overig_buy": {"categ": "tax_categ_s"},
"btw_6_buy_d": {"categ": "tax_categ_aa"},
"btw_9_buy_d": {"categ": "tax_categ_aa"},
"btw_21_buy_d": {"categ": "tax_categ_h"},
"btw_overig_buy_d": {"categ": "tax_categ_s"},
"btw_verk_0": {"categ": "tax_categ_b"},
"btw_ink_0": {"categ": "tax_categ_b"},
"btw_I_6": {"categ": "tax_categ_aa"},
"btw_I_9": {"categ": "tax_categ_aa"},
"btw_I_21": {"categ": "tax_categ_h"},
"btw_I_overig": {"categ": "tax_categ_s"},
"btw_X0_producten": {"categ": "tax_categ_e"},
"btw_X0_diensten": {"categ": "tax_categ_e"},
"btw_X2": {"categ": "tax_categ_e"},
"btw_I_6_d": {"categ": "tax_categ_aa"},
"btw_I_9_d": {"categ": "tax_categ_aa"},
"btw_I_21_d": {"categ": "tax_categ_h"},
"btw_I_overig_d": {"categ": "tax_categ_s"},
"btw_E1": {"categ": "tax_categ_aa"},
"btw_E1_9": {"categ": "tax_categ_aa"},
"btw_E2": {"categ": "tax_categ_h"},
"btw_E_overig": {"categ": "tax_categ_s"},
"btw_X1": {"categ": "tax_categ_b"},
"btw_X3": {"categ": "tax_categ_b"},
"btw_E1_d": {"categ": "tax_categ_aa"},
"btw_E1_d_9": {"categ": "tax_categ_aa"},
"btw_E2_d": {"categ": "tax_categ_h"},
"btw_E_overig_d": {"categ": "tax_categ_s"},
}
class ResCompany(models.Model):
_inherit = "res.company"
def _l10n_nl_set_unece_on_taxes(self):
self.ensure_one()
taxes = self.env["account.tax"].search([("company_id", "=", self.id)])
ext_id_map = self._l10n_nl_get_external_tax_id_map(taxes)
for tax in taxes:
if tax.id in ext_id_map:
tax_categ = self._l10n_nl_get_tax_categ(ext_id_map, tax)
if tax_categ:
external_name = "account_tax_unece." + tax_categ
categ_id = self.env.ref(external_name).id or False
utype_id = self.env.ref("account_tax_unece.tax_type_vat").id
tax.write({"unece_type_id": utype_id, "unece_categ_id": categ_id})
@api.model
def _l10n_nl_get_tax_categ(self, ext_id_map, tax):
map_tax_index = str(ext_id_map[tax.id]).split("_", 1)[1]
return MAPPING.get(map_tax_index, {}).get("categ")
@api.model
def _l10n_nl_get_external_tax_id_map(self, taxes):
tax_data = self.env["ir.model.data"].search_read(
[("model", "=", "account.tax"), ("res_id", "in", taxes.ids)],
["name", "res_id"],
)
res_map_dict = {}
for item in tax_data:
res_map_dict[item["res_id"]] = item["name"]
return res_map_dict
|
PypiClean
|
/python-pyparts-1.0.0.tar.gz/python-pyparts-1.0.0/src/pyparts/systems/temperature_controller.py
|
import time
from pyparts.logic import pid_controller
class TemperatureController(object):
"""A PID based temperature controller.
TemperatureController uses a PID controller to regulate temperature.
A temperature sensor is used to read the current temperature and a PWM based
heater is used to increase the temperature when needed. Once the desired
temperature is reached the heater turns off until the temperature falls below
the desired temperature.
Attributes:
_temp_sensor: TemperatureSensor. A temperature sensor to read temperature
values from.
_heater_pin: PwmOutput. A PWM output that controls a heating element.
_pid_worker: PIDController.Worker. Worker thread for maintaining a
temperature.
"""
# Error value at which PWM output will be set to 100%
MAX_ERROR_DEGREES_C = 10.0
def __init__(self, temp_sensor, heater_pin, kp, ki, kd):
"""Creates a TemperatureController.
Args:
temp_sensor: TemperatureSensor. A temperature sensor to read temperature.
heater_pin: PwmOutput. A PWM output that controls a heating element.
kp: Integer. PID controller constant term.
ki: Integer. PID controller integrator term.
kd: Integer. PID controller differentiator term.
"""
self._temp_sensor = temp_sensor
self._heater_pin = heater_pin
self._pid_worker = pid_controller.PIDController.Worker(
kp, ki, kd, self._pid_input_func, self._pid_output_func)
self._is_enabled = False
def _pid_input_func(self):
"""Input function to the PID controller."""
return self._temp_sensor.temp_c
def _pid_output_func(self, val):
"""Output function to handle PID controller error values.
Args:
val: Float. The output value from the PID controller.
"""
# If the sensor is too hot, turn off the heater
if val <= 0:
self._heater_pin.set_duty_cycle(0)
return
if val > self.MAX_ERROR_DEGREES_C:
self._heater_pin.set_duty_cycle(100)
return
self._heater_pin.set_duty_cycle(
(float(val) / self.MAX_ERROR_DEGREES_C) * 100)
time.sleep(1)
def set_temp_c(self, temp_c):
"""Set the desired temerature value.
Args:
temp_c: Integer. The temperature to target with the controller.
"""
self._pid_worker.desired_value = temp
@property
def temp_setting(self):
"""Get the current temperature set point."""
return self._pid_worker.desired_value
def enable(self):
"""Enable the temperature sensor and begin controlling the temperature."""
if not self._is_enabled:
self._pid_worker.start()
self._is_enabled = True
def disable(self):
"""Stops the temperature sensor and stops controlling the temperature."""
if self._is_enabled:
self._pid_worker.stop()
self._is_enabled = False
@property
def is_enabled(self):
"""Checks whether the temperature controller has been enabled or not.
Returns:
Boolean. True if the controller has been started.
"""
return self._is_enabled
|
PypiClean
|
/pointers.py-2.4.0-cp36-cp36m-macosx_10_9_x86_64.whl/pointers/base_pointers.py
|
import ctypes
import sys
import warnings
import weakref
from abc import ABC, abstractmethod
from contextlib import suppress
from typing import (
Any, Generic, Iterator, Optional, Tuple, Type, TypeVar, Union
)
from typing_extensions import final
from _pointers import add_ref, remove_ref
from ._utils import deref, force_set_attr, move_to_mem
from .exceptions import DereferenceError, FreedMemoryError, NullPointerError
from .util import NULL, Nullable, handle
__all__ = (
"BasePointer",
"BaseObjectPointer",
"BasicPointer",
"BaseCPointer",
"BaseAllocatedPointer",
"Dereferencable",
"IterDereferencable",
)
warnings.simplefilter("always", DeprecationWarning)
T = TypeVar("T")
A = TypeVar("A", bound="BasicPointer")
class BasicPointer(ABC):
"""Base class representing a pointer with no operations."""
@property
@abstractmethod
def address(self) -> Optional[int]:
"""Address that the pointer is looking at."""
...
@abstractmethod
def __repr__(self) -> str:
...
@final
def __str__(self) -> str:
return f"{type(self).__name__}({hex(self.address or 0)})"
@abstractmethod
def _cleanup(self) -> None:
...
@final
def __eq__(self, data: object) -> bool:
if not isinstance(data, BasePointer):
return False
return data.address == self.address
@final
def ensure(self) -> int:
"""Ensure that the pointer is not null.
Raises:
NullPointerError: Address of pointer is `None`
Returns:
Address of the pointer.
Example:
```py
ptr = to_ptr(NULL)
address = ptr.ensure() # NullPointerError
ptr >>= 1
address = ptr.ensure() # works just fine
```"""
if not self.address:
raise NullPointerError("pointer is NULL")
return self.address
class Movable(ABC, Generic[T, A]):
@abstractmethod
def move(
self,
data: Union[A, T],
*,
unsafe: bool = False,
) -> None:
...
def __ilshift__(self, data: Union[A, T]):
self.move(data)
return self
def __ixor__(self, data: Union[A, T]):
self.move(data, unsafe=True)
return self
class Dereferencable(ABC, Generic[T]):
"""Abstract class for an object that may be dereferenced."""
@abstractmethod
def dereference(self) -> T:
"""Dereference the pointer.
Returns:
Value at the pointers address."""
...
@final
def __invert__(self) -> T:
"""Dereference the pointer."""
return self.dereference()
class IterDereferencable(Dereferencable[T], Generic[T]):
"""
Abstract class for an object that may be dereferenced via * (`__iter__`)
"""
def __iter__(self) -> Iterator[T]:
"""Dereference the pointer."""
return iter({self.dereference()})
class BasePointer(
Dereferencable[T],
Movable[T, "BasePointer[T]"],
BasicPointer,
ABC,
Generic[T],
):
"""Base class representing a pointer."""
@property
@abstractmethod
def address(self) -> Optional[int]:
"""Address that the pointer is looking at."""
...
@abstractmethod
def __repr__(self) -> str:
...
@abstractmethod
def _cleanup(self) -> None:
...
class Typed(ABC, Generic[T]):
"""Base class for a pointer that has a type attribute."""
@property
@abstractmethod
def type(self) -> T:
"""Type of the value at the address."""
...
class Sized(ABC):
"""Base class for a pointer that has a size attribute."""
@abstractmethod
def ensure(self) -> int:
...
@property
@abstractmethod
def size(self) -> int:
"""Size of the target value."""
...
@handle
@final
def make_ct_pointer(self) -> "ctypes._PointerLike":
"""Convert the address to a ctypes pointer.
Returns:
The created ctypes pointer.
"""
return ctypes.cast(
self.ensure(),
ctypes.POINTER(ctypes.c_char * self.size),
)
@abstractmethod
def _make_stream_and_ptr(
self,
size: int,
address: int,
) -> Tuple["ctypes._PointerLike", bytes]:
...
class BaseObjectPointer(
Typed[Type[T]],
IterDereferencable[T],
BasePointer[T],
ABC,
):
"""Abstract class for a pointer to a Python object."""
def __init__(
self,
address: Optional[int],
increment_ref: bool = False,
) -> None:
"""
Args:
address: Address of the underlying value.
increment_ref: Should the reference count on the target object get incremented.
""" # noqa
self._address: Optional[int] = address
if increment_ref and address:
add_ref(~self)
self._origin_size = sys.getsizeof(~self if address else None)
weakref.finalize(self, self._cleanup)
@property
def type(self) -> Type[T]:
warnings.warn(
"BaseObjectPointer.type is deprecated, please use type(~ptr) instead", # noqa
DeprecationWarning,
)
return type(~self)
@handle
def set_attr(self, key: str, value: Any) -> None:
v: Any = ~self # mypy gets angry if this isnt any
if not isinstance(~self, type):
v = type(v)
force_set_attr(v, key, value)
@handle
def assign(
self,
target: Nullable[Union["BaseObjectPointer[T]", T]],
) -> None:
"""Point to a new address.
Args:
target: New pointer or value to look at.
"""
if target is NULL:
self._address = None
return
new: BasePointer[T] = self._get_ptr(target) # type: ignore
if not isinstance(new, BaseObjectPointer):
raise ValueError(
"can only point to object pointer",
)
with suppress(NullPointerError):
remove_ref(~self)
self._address = new.address
add_ref(~self)
@property
def address(self) -> Optional[int]:
return self._address
@handle
def dereference(self) -> T:
return deref(self.ensure())
def __irshift__(
self,
value: Nullable[Union["BaseObjectPointer[T]", T]],
):
self.assign(value)
return self
@classmethod
@abstractmethod
def make_from(cls, obj: T) -> "BaseObjectPointer[T]":
"""Create a new instance of the pointer.
Args:
obj: Object to create pointer to.
Returns:
Created pointer.
Example:
```py
ptr = Pointer.make_from(1)
```"""
...
@classmethod
def _get_ptr(cls, obj: Union[T, "BasePointer[T]"]) -> "BasePointer[T]":
return (
obj
if isinstance(
obj,
BasePointer,
)
else cls.make_from(obj)
)
def _cleanup(self) -> None:
if self.address:
remove_ref(~self)
class BaseCPointer(
IterDereferencable[T],
Movable[T, "BaseCPointer[T]"],
BasicPointer,
Sized,
ABC,
):
def __init__(self, address: int, size: int):
self._address = address
self._size = size
weakref.finalize(self, self._cleanup)
@property
def address(self) -> Optional[int]:
return self._address
def _make_stream_and_ptr(
self,
size: int,
address: int,
) -> Tuple["ctypes._PointerLike", bytes]:
bytes_a = (ctypes.c_ubyte * size).from_address(address)
return self.make_ct_pointer(), bytes(bytes_a)
@handle
def move(
self,
data: Union["BaseCPointer[T]", T],
*,
unsafe: bool = False,
) -> None:
"""Move data to the target address."""
if not isinstance(data, BaseCPointer):
raise ValueError(
f'"{type(data).__name__}" object is not a valid C pointer',
)
ptr, byte_stream = self._make_stream_and_ptr(
data.size,
data.ensure(),
)
move_to_mem(ptr, byte_stream, unsafe=unsafe, target="C data")
def __ilshift__(self, data: Union["BaseCPointer[T]", T]):
self.move(data)
return self
def __ixor__(self, data: Union["BaseCPointer[T]", T]):
self.move(data, unsafe=True)
return self
@handle
def make_ct_pointer(self):
return ctypes.cast(
self.ensure(),
ctypes.POINTER(ctypes.c_char * self.size),
)
@abstractmethod
def _as_parameter_(self) -> "ctypes._CData":
"""Convert the pointer to a ctypes pointer."""
...
@abstractmethod
def _cleanup(self) -> None:
...
class BaseAllocatedPointer(BasePointer[T], Sized, ABC):
@property
@abstractmethod
def address(self) -> Optional[int]:
...
@address.setter
def address(self, value: int) -> None:
...
@property
def freed(self) -> bool:
"""Whether the allocated memory has been freed."""
return self._freed
@freed.setter
def freed(self, value: bool) -> None:
self._freed = value
@property
def assigned(self) -> bool:
"""Whether the allocated memory has been assigned a value."""
return self._assigned
@assigned.setter
def assigned(self, value: bool) -> None:
self._assigned = value
@handle
def move(
self,
data: Union[BasePointer[T], T],
unsafe: bool = False,
) -> None:
add_ref(data)
self.ensure_valid()
from .object_pointer import to_ptr
data_ptr = data if isinstance(data, BasePointer) else to_ptr(data)
ptr, byte_stream = self._make_stream_and_ptr(
sys.getsizeof(~data_ptr),
data_ptr.ensure(),
)
move_to_mem(ptr, byte_stream, unsafe=unsafe)
self.assigned = True
remove_ref(data)
@handle
def dereference(self) -> T:
if self.freed:
raise FreedMemoryError(
"cannot dereference memory that has been freed",
)
if not self.assigned:
raise DereferenceError(
"cannot dereference allocated memory that has no value",
)
return deref(self.ensure())
@abstractmethod
def __add__(self, amount: int) -> "BaseAllocatedPointer[Any]":
...
@abstractmethod
def __sub__(self, amount: int) -> "BaseAllocatedPointer[Any]":
...
def _cleanup(self) -> None:
pass
def _make_stream_and_ptr(
self,
size: int,
address: int,
) -> Tuple["ctypes._PointerLike", bytes]:
if self.freed:
raise FreedMemoryError("memory has been freed")
bytes_a = (ctypes.c_ubyte * size).from_address(address) # fmt: off
return self.make_ct_pointer(), bytes(bytes_a)
@abstractmethod
def free(self) -> None:
"""Free the memory."""
...
def ensure_valid(self) -> None:
"""Ensure the memory has not been freed."""
if self.freed:
raise FreedMemoryError(
f"{self} has been freed",
)
@property
def size(self) -> int:
return self._size
@size.setter
def size(self, value: int) -> None:
self._size = value
|
PypiClean
|
/btc-lib-0.0.2.tar.gz/btc-lib-0.0.2/README.md
|
btc-lib: Simple Bitcoin Library.
=======================
This library is a simple python cold wallet management library for the Bitcoin network. It allows you to generate private/public keys, get Bitcoin addresses from them, create transactions, deserialize already created ones, sign them, sign messages, create multi-signatures and multi-addresses using Bitcoin Script. But its most important difference from most similar libraries is support for Segwit (bech32 encoding) addresses and transactions with them. You can get address using PublicKey.get_address, or create it from your own hash.
**Examples:**
``` python
>>> from btclib import PrivateKey, Address, Transaction, Output, get_inputs
>>>
>>> my_wif = 'cMtnJjkY8hBrNdNN1kPBCMuTM5h4rxes9nrfRfktTn8tW6HW2pC2'
>>> my_pv = PrivateKey(my_wif)
>>> my_pub = my_pv.pub
>>>
>>> my_pub.get_address('P2PKH').string
'12Nj1W9U7xvzbRFsMErK8hsm7pYGZv9jsT'
>>>
>>> my_pub.get_address('P2SH-P2WPKH').string
'39YgiFhV8U5rWiUQLh5sDeGJvaft81k1sV'
>>>
>>> my_pub.get_address('P2WPKH').string
'bc1qpuf7m9ysjtnxhpfvx80v6lptsk33lm2x3t9s5w'
>>>
>>> my_pub.get_address('P2WSH').string
'bc1qxmh2drh6xsqyr5m4c8f72fwmqskmgk0rdqtggn6leswzf6m4kxvqhehfwy'
>>>
>>>
>>> addr = my_pub.get_address('P2WSH', 'testnet')
>>> addr.string
'tb1qxmh2drh6xsqyr5m4c8f72fwmqskmgk0rdqtggn6leswzf6m4kxvqq3px5t'
>>>
>>> # To see address info:
>>> addr.get_info() # request to one of the blockchain APIs
AddressINF(received=100000, sent=0, tx_count=1, balance=100000)
>>>
>>> # To get unspents (UTXO):
>>> addr.get_unspents()
[Unspent(tx_id='748131e63a0c27a407316cdafb7cad20ec0994c856862d63e50e706073bc7f00', out_index=0, amount=100000)]
>>>
>>> # To get inputs (itself converts unspent to input)
>>> inps = get_inputs([my_pv, addr]) # Input contains a private key to be able to sign yourself in a transaction
>>>
>>> out = Output(Address('tb1q7n075vj7tz4jm28zky7dzknuxujzl5vt6pxkz4'), 90000)
>>>
>>> tx = Transaction(inps, [out])
>>> tx.default_sign_inputs() # will be used: inp.default_sign() for inp in tx.inputs
>>>
>>> # Notice: inp.default_sign (tx) will try to sign itself in the tx transaction (set the desired inp.script / inp.witness value),
>>> # if inp.address was obtained using PublicKey.get_address, it will succeed, but if the address hash was generated by your custom algorithm,
>>> # and the address object itself was obtained using address.<P2PKH/P2SH/P2WPKH/P2WSH>.from_hash, maybe the signature algorithm will differ
>>> # from the algorithm in inp.default_sign, for this use inp.custom_sign(script=Script(...), witness=Script(...)).
>>> # To summarize: if the address was obtained with PublicKey.get_address(<some_type>, <some_network>), Input.default_sign will be able to sign it.
>>> # else, use inp.custom_sign with custom scripts.
>>>
>>> tx.serialize()
'02000000000101007fbc7360700ee5632d8656c89409ec20ad7cfbda6c3107a4270c3ae63181740000000000ffffffff01905f010000000000160014f4dfea325e58ab2da8e2b13cd...'
>>> tx.get_id()
'fff79b6d9f6a4068d5b8298c522177e9783af70d61653d628314e155a1e0e94e'
>>> tx.push()
True
```
This transaction - https://www.blockchain.com/ru/btc-testnet/tx/fff79b6d9f6a4068d5b8298c522177e9783af70d61653d628314e155a1e0e94e.
Installation
------------
btc-lib is distributed on `PyPI` and is available on Linux/macOS
and Windows and supports Python 3.10+.
```bash
$ python3 -m pip install btc-lib
```
|
PypiClean
|
/intel_tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/data/experimental/ops/io.py
|
"""Python API for save and loading a dataset."""
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
COMPRESSION_GZIP = "GZIP"
COMPRESSION_SNAPPY = "NONE"
DATASET_SPEC_FILENAME = "dataset_spec.pb"
@tf_export("data.experimental.save", v1=[])
@deprecation.deprecated(None, "Use `tf.data.Dataset.save(...)` instead.")
def save(dataset,
path,
compression=None,
shard_func=None,
checkpoint_args=None):
"""Saves the content of the given dataset.
Example usage:
>>> import tempfile
>>> path = os.path.join(tempfile.gettempdir(), "saved_data")
>>> # Save a dataset
>>> dataset = tf.data.Dataset.range(2)
>>> tf.data.experimental.save(dataset, path)
>>> new_dataset = tf.data.experimental.load(path)
>>> for elem in new_dataset:
... print(elem)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
The saved dataset is saved in multiple file "shards". By default, the dataset
output is divided to shards in a round-robin fashion but custom sharding can
be specified via the `shard_func` function. For example, you can save the
dataset to using a single shard as follows:
```python
dataset = make_dataset()
def custom_shard_func(element):
return np.int64(0)
dataset = tf.data.experimental.save(
path="/path/to/data", ..., shard_func=custom_shard_func)
```
To enable checkpointing, pass in `checkpoint_args` to the `save` method
as follows:
```python
dataset = tf.data.Dataset.range(100)
save_dir = "..."
checkpoint_prefix = "..."
step_counter = tf.Variable(0, trainable=False)
checkpoint_args = {
"checkpoint_interval": 50,
"step_counter": step_counter,
"directory": checkpoint_prefix,
"max_to_keep": 20,
}
dataset.save(dataset, save_dir, checkpoint_args=checkpoint_args)
```
NOTE: The directory layout and file format used for saving the dataset is
considered an implementation detail and may change. For this reason, datasets
saved through `tf.data.experimental.save` should only be consumed through
`tf.data.experimental.load`, which is guaranteed to be backwards compatible.
Args:
dataset: The dataset to save.
path: Required. A directory to use for saving the dataset.
compression: Optional. The algorithm to use to compress data when writing
it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
shard_func: Optional. A function to control the mapping of dataset elements
to file shards. The function is expected to map elements of the input
dataset to int64 shard IDs. If present, the function will be traced and
executed as graph computation.
checkpoint_args: Optional args for checkpointing which will be passed into
the `tf.train.CheckpointManager`. If `checkpoint_args` are not specified,
then checkpointing will not be performed. The `save()` implementation
creates a `tf.train.Checkpoint` object internally, so users should not
set the `checkpoint` argument in `checkpoint_args`.
Returns:
An operation which when executed performs the save. When writing
checkpoints, returns None. The return value is useful in unit tests.
Raises:
ValueError if `checkpoint` is passed into `checkpoint_args`.
"""
return dataset.save(path, compression, shard_func, checkpoint_args)
@tf_export("data.experimental.load", v1=[])
@deprecation.deprecated(None, "Use `tf.data.Dataset.load(...)` instead.")
def load(path, element_spec=None, compression=None, reader_func=None):
"""Loads a previously saved dataset.
Example usage:
>>> import tempfile
>>> path = os.path.join(tempfile.gettempdir(), "saved_data")
>>> # Save a dataset
>>> dataset = tf.data.Dataset.range(2)
>>> tf.data.experimental.save(dataset, path)
>>> new_dataset = tf.data.experimental.load(path)
>>> for elem in new_dataset:
... print(elem)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
If the default option of sharding the saved dataset was used, the element
order of the saved dataset will be preserved when loading it.
The `reader_func` argument can be used to specify a custom order in which
elements should be loaded from the individual shards. The `reader_func` is
expected to take a single argument -- a dataset of datasets, each containing
elements of one of the shards -- and return a dataset of elements. For
example, the order of shards can be shuffled when loading them as follows:
```python
def custom_reader_func(datasets):
datasets = datasets.shuffle(NUM_SHARDS)
return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
dataset = tf.data.experimental.load(
path="/path/to/data", ..., reader_func=custom_reader_func)
```
Args:
path: Required. A path pointing to a previously saved dataset.
element_spec: Optional. A nested structure of `tf.TypeSpec` objects matching
the structure of an element of the saved dataset and specifying the type
of individual element components. If not provided, the nested structure of
`tf.TypeSpec` saved with the saved dataset is used. Note that this
argument is required in graph mode.
compression: Optional. The algorithm to use to decompress the data when
reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
reader_func: Optional. A function to control how to read data from shards.
If present, the function will be traced and executed as graph computation.
Returns:
A `tf.data.Dataset` instance.
Raises:
FileNotFoundError: If `element_spec` is not specified and the saved nested
structure of `tf.TypeSpec` can not be located with the saved dataset.
ValueError: If `element_spec` is not specified and the method is executed
in graph mode.
"""
return dataset_ops.Dataset.load(path, element_spec, compression, reader_func)
|
PypiClean
|
/crmCustomer-1.0.1-py3-none-any.whl/crmSaleOrder/operation.py
|
def getCode(app3):
'''
查询出表中的编码
:param app2:
:return:
'''
sql = "select FSALEORDERNO from RDS_CRM_SRC_sales_order where FIsDo=0 and FIsfree!=1 "
res = app3.select(sql)
return res
def getClassfyData(app3, code):
'''
获得分类数据
:param app2:
:param code:
:return:
'''
sql = f"select FInterID,FSALEORDERNO,FBILLTYPEIDNAME,FSALEDATE,FCUSTCODE,FCUSTOMNAME,FSALEORDERENTRYSEQ,FPRDNUMBER,FPRDNAME,FQTY,FPRICE,FMONEY,FTAXRATE,FTAXAMOUNT,FTAXPRICE,FALLAMOUNTFOR,FSALDEPT,FSALGROUP,FSALER,FDESCRIPTION,UPDATETIME,FIsfree,FIsDO,FPurchaseDate,FCollectionTerms,FUrgency,FSalesType,FCurrencyName from RDS_CRM_SRC_sales_order where FSALEORDERNO='{code}'"
res = app3.select(sql)
return res
def code_conversion(app2, tableName, param, param2):
'''
通过ECS物料编码来查询系统内的编码
:param app2: 数据库操作对象
:param tableName: 表名
:param param: 参数1
:param param2: 参数2
:return:
'''
sql = f"select FNumber from {tableName} where {param}='{param2}'"
res = app2.select(sql)
if res == []:
return ""
else:
return res[0]['FNumber']
def code_conversion_org(app2, tableName, param, param2, param3):
'''
通过ECS物料编码来查询系统内的编码
:param app2: 数据库操作对象
:param tableName: 表名
:param param: 参数1
:param param2: 参数2
:return:
'''
sql = f"select FNumber from {tableName} where {param}='{param2}' and FOrgNumber='{param3}'"
res = app2.select(sql)
if res == []:
return ""
else:
return res[0]['FNumber']
def changeStatus(app2, fnumber, status):
'''
将没有写入的数据状态改为2
:param app2: 执行sql语句对象
:param fnumber: 订单编码
:param status: 数据状态
:return:
'''
sql = f"update a set a.FIsDO={status} from RDS_CRM_SRC_sales_order a where FSALEORDERNO='{fnumber}'"
app2.update(sql)
def getFinterId(app2, tableName):
'''
在两张表中找到最后一列数据的索引值
:param app2: sql语句执行对象
:param tableName: 要查询数据对应的表名表名
:return:
'''
sql = f"select isnull(max(FInterId),0) as FMaxId from {tableName}"
res = app2.select(sql)
return res[0]['FMaxId']
def checkDataExist(app2, FSEQ):
'''
通过FSEQ字段判断数据是否在表中存在
:param app2:
:param FSEQ:
:return:
'''
sql = f"select FSALEORDERENTRYSEQ from RDS_CRM_SRC_Sales_Order where FSALEORDERENTRYSEQ={FSEQ}"
res = app2.select(sql)
if res == []:
return True
else:
return False
def insert_SAL_ORDER_Table(app2, data):
'''
将数据插入销售订单SRC表中
:param app2: 操作数据库对象
:param data: 数据源
:return:
'''
for i in data.index:
if checkDataExist(app2, data.loc[i]['FSALEORDERENTRYSEQ']):
sql = f"insert into RDS_CRM_SRC_Sales_Order(FInterID,FSALEORDERNO,FBILLTYPEIDNAME,FSALEDATE,FCUSTCODE,FCUSTOMNAME,FSALEORDERENTRYSEQ,FPRDNUMBER,FPRDNAME,FQTY,FPRICE,FMONEY,FTAXRATE,FTAXAMOUNT,FTAXPRICE,FALLAMOUNTFOR,FSALDEPT,FSALGROUP,FSALER,FDESCRIPTION,FIsfree,FIsDO,FCollectionTerms,FUrgency,FSalesType,FUpDateTime,FCurrencyName) values({getFinterId(app2, 'RDS_CRM_SRC_Sales_Order') + 1},'{data.loc[i]['FSALEORDERNO']}','{data.loc[i]['FBILLTYPEIDNAME']}','{data.loc[i]['FSALEDATE']}','{data.loc[i]['FCUSTCODE']}','{data.loc[i]['FCUSTOMNAME']}','{data.loc[i]['FSALEORDERENTRYSEQ']}','{data.loc[i]['FPRDNUMBER']}','{data.loc[i]['FPRDNAME']}','{data.loc[i]['FQTY']}','{data.loc[i]['FPRICE']}','{data.loc[i]['FMONEY']}','{data.loc[i]['FTAXRATE']}','{data.loc[i]['FTAXAMOUNT']}','{data.loc[i]['FTAXPRICE']}','{data.loc[i]['FAMOUNT']}','{data.loc[i]['FSALDEPTID']}','{data.loc[i]['FSALGROUPID']}','{data.loc[i]['FSALERID']}','{data.loc[i]['FDESCRIPTION']}','0','0','月结30天','一般','内销',getdate(),'{data.loc[i]['FCURRENCYID']}')"
app2.insert(sql)
|
PypiClean
|
/fusspy-2.2.1.tar.gz/fusspy-2.2.1/astropy_helpers/astropy_helpers/sphinx/ext/autodoc_enhancements.py
|
import inspect
import sys
import types
from sphinx.ext.autodoc import AttributeDocumenter, ModuleDocumenter
from sphinx.util.inspect import isdescriptor
if sys.version_info[0] == 3:
class_types = (type,)
else:
class_types = (type, types.ClassType)
MethodDescriptorType = type(type.__subclasses__)
# See
# https://github.com/astropy/astropy-helpers/issues/116#issuecomment-71254836
# for further background on this.
def type_object_attrgetter(obj, attr, *defargs):
"""
This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass which
*also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find
the "foo" property on the metaclass and resolve it. For the purposes of
autodoc we just want to document the "foo" property defined on the class,
not on the metaclass.
For example::
>>> class Meta(type):
... @property
... def foo(cls):
... return 'foo'
...
>>> class MyClass(metaclass=Meta):
... @property
... def foo(self):
... \"\"\"Docstring for MyClass.foo property.\"\"\"
... return 'myfoo'
...
>>> getattr(MyClass, 'foo')
'foo'
>>> type_object_attrgetter(MyClass, 'foo')
<property at 0x...>
>>> type_object_attrgetter(MyClass, 'foo').__doc__
'Docstring for MyClass.foo property.'
The last line of the example shows the desired behavior for the purposes
of autodoc.
"""
for base in obj.__mro__:
if attr in base.__dict__:
if isinstance(base.__dict__[attr], property):
# Note, this should only be used for properties--for any other
# type of descriptor (classmethod, for example) this can mess
# up existing expectations of what getattr(cls, ...) returns
return base.__dict__[attr]
break
return getattr(obj, attr, *defargs)
# Provided to work around a bug in Sphinx
# See https://github.com/sphinx-doc/sphinx/pull/1843
class AttributeDocumenter(AttributeDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
non_attr_types = cls.method_types + class_types + \
(MethodDescriptorType,)
isdatadesc = isdescriptor(member) and not \
isinstance(member, non_attr_types) and not \
type(member).__name__ == "instancemethod"
# That last condition addresses an obscure case of C-defined
# methods using a deprecated type in Python 3, that is not otherwise
# exported anywhere by Python
return isdatadesc or (not isinstance(parent, ModuleDocumenter) and
not inspect.isroutine(member) and
not isinstance(member, class_types))
def setup(app):
# Must have the autodoc extension set up first so we can override it
app.setup_extension('sphinx.ext.autodoc')
# Need to import this too since it re-registers all the documenter types
# =_=
import sphinx.ext.autosummary.generate
app.add_autodoc_attrgetter(type, type_object_attrgetter)
if sphinx.version_info < (1,4,2):
# this is a really ugly hack to supress a warning that sphinx 1.4
# generates when overriding an existing directive (which is *desired*
# behavior here). As of sphinx v1.4.2, this has been fixed:
# https://github.com/sphinx-doc/sphinx/issues/2451
# But we leave it in for 1.4.0/1.4.1 . But if the "needs_sphinx" is
# eventually updated to >= 1.4.2, this should be removed entirely (in
# favor of the line in the "else" clause)
_oldwarn = app._warning
_oldwarncount = app._warncount
try:
try:
# *this* is in a try/finally because we don't want to force six as
# a real dependency. In sphinx 1.4, six is a prerequisite, so
# there's no issue. But in older sphinxes this may not be true...
# but the inderlying warning is absent anyway so we let it slide.
from six import StringIO
app._warning = StringIO()
except ImportError:
pass
app.add_autodocumenter(AttributeDocumenter)
finally:
app._warning = _oldwarn
app._warncount = _oldwarncount
else:
app.add_autodocumenter(AttributeDocumenter)
|
PypiClean
|
/salt-ssh-9000.tar.gz/salt-ssh-9000/salt/modules/win_servermanager.py
|
import logging
import salt.utils.json
import salt.utils.platform
import salt.utils.powershell
import salt.utils.versions
from salt.exceptions import CommandExecutionError
try:
from shlex import quote as _cmd_quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _cmd_quote
log = logging.getLogger(__name__)
__virtualname__ = "win_servermanager"
def __virtual__():
"""
Load only on windows with servermanager module
"""
if not salt.utils.platform.is_windows():
return (
False,
"Module win_servermanager: module only works on Windows systems.",
)
if salt.utils.versions.version_cmp(__grains__["osversion"], "6.1.7600") == -1:
return (
False,
"Failed to load win_servermanager module: "
"Requires Remote Server Administration Tools which "
"is only available on Windows 2008 R2 and later.",
)
if not salt.utils.powershell.module_exists("ServerManager"):
return (
False,
"Failed to load win_servermanager module: "
"ServerManager module not available. "
"May need to install Remote Server Administration Tools.",
)
return __virtualname__
def _pshell_json(cmd, cwd=None):
"""
Execute the desired powershell command and ensure that it returns data
in JSON format and load that into python
"""
cmd = "Import-Module ServerManager; {}".format(cmd)
if "convertto-json" not in cmd.lower():
cmd = "{} | ConvertTo-Json".format(cmd)
log.debug("PowerShell: %s", cmd)
ret = __salt__["cmd.run_all"](cmd, shell="powershell", cwd=cwd)
if "pid" in ret:
del ret["pid"]
if ret.get("stderr", ""):
error = ret["stderr"].splitlines()[0]
raise CommandExecutionError(error, info=ret)
if "retcode" not in ret or ret["retcode"] != 0:
# run_all logs an error to log.error, fail hard back to the user
raise CommandExecutionError(
"Issue executing PowerShell {}".format(cmd), info=ret
)
# Sometimes Powershell returns an empty string, which isn't valid JSON
if ret["stdout"] == "":
ret["stdout"] = "{}"
try:
ret = salt.utils.json.loads(ret["stdout"], strict=False)
except ValueError:
raise CommandExecutionError("No JSON results from PowerShell", info=ret)
return ret
def list_available():
"""
List available features to install
Returns:
str: A list of available features as returned by the
``Get-WindowsFeature`` PowerShell command
CLI Example:
.. code-block:: bash
salt '*' win_servermanager.list_available
"""
cmd = (
"Import-Module ServerManager; "
"Get-WindowsFeature "
"-ErrorAction SilentlyContinue "
"-WarningAction SilentlyContinue"
)
return __salt__["cmd.shell"](cmd, shell="powershell")
def list_installed():
"""
List installed features. Supported on Windows Server 2008 and Windows 8 and
newer.
Returns:
dict: A dictionary of installed features
CLI Example:
.. code-block:: bash
salt '*' win_servermanager.list_installed
"""
cmd = (
"Get-WindowsFeature "
"-ErrorAction SilentlyContinue "
"-WarningAction SilentlyContinue "
"| Select DisplayName,Name,Installed"
)
features = _pshell_json(cmd)
ret = {}
for entry in features:
if entry["Installed"]:
ret[entry["Name"]] = entry["DisplayName"]
return ret
def install(feature, recurse=False, restart=False, source=None, exclude=None):
r"""
Install a feature
.. note::
Some features require reboot after un/installation, if so until the
server is restarted other features can not be installed!
.. note::
Some features take a long time to complete un/installation, set -t with
a long timeout
Args:
feature (str, list):
The name of the feature(s) to install. This can be a single feature,
a string of features in a comma delimited list (no spaces), or a
list of features.
.. versionadded:: 2018.3.0
Added the ability to pass a list of features to be installed.
recurse (Options[bool]):
Install all sub-features. Default is False
restart (Optional[bool]):
Restarts the computer when installation is complete, if required by
the role/feature installed. Will also trigger a reboot if an item
in ``exclude`` requires a reboot to be properly removed. Default is
False
source (Optional[str]):
Path to the source files if missing from the target system. None
means that the system will use windows update services to find the
required files. Default is None
exclude (Optional[str]):
The name of the feature to exclude when installing the named
feature. This can be a single feature, a string of features in a
comma-delimited list (no spaces), or a list of features.
.. warning::
As there is no exclude option for the ``Add-WindowsFeature``
or ``Install-WindowsFeature`` PowerShell commands the features
named in ``exclude`` will be installed with other sub-features
and will then be removed. **If the feature named in ``exclude``
is not a sub-feature of one of the installed items it will still
be removed.**
Returns:
dict: A dictionary containing the results of the install
CLI Example:
.. code-block:: bash
# Install the Telnet Client passing a single string
salt '*' win_servermanager.install Telnet-Client
# Install the TFTP Client and the SNMP Service passing a comma-delimited
# string. Install all sub-features
salt '*' win_servermanager.install TFTP-Client,SNMP-Service recurse=True
# Install the TFTP Client from d:\side-by-side
salt '*' win_servermanager.install TFTP-Client source=d:\\side-by-side
# Install the XPS Viewer, SNMP Service, and Remote Access passing a
# list. Install all sub-features, but exclude the Web Server
salt '*' win_servermanager.install "['XPS-Viewer', 'SNMP-Service', 'RemoteAccess']" True recurse=True exclude="Web-Server"
"""
# If it is a list of features, make it a comma delimited string
if isinstance(feature, list):
feature = ",".join(feature)
# Use Install-WindowsFeature on Windows 2012 (osversion 6.2) and later
# minions. Default to Add-WindowsFeature for earlier releases of Windows.
# The newer command makes management tools optional so add them for parity
# with old behavior.
command = "Add-WindowsFeature"
management_tools = ""
if salt.utils.versions.version_cmp(__grains__["osversion"], "6.2") >= 0:
command = "Install-WindowsFeature"
management_tools = "-IncludeManagementTools"
cmd = "{} -Name {} {} {} {} -WarningAction SilentlyContinue".format(
command,
_cmd_quote(feature),
management_tools,
"-IncludeAllSubFeature" if recurse else "",
"" if source is None else "-Source {}".format(source),
)
out = _pshell_json(cmd)
# Uninstall items in the exclude list
# The Install-WindowsFeature command doesn't have the concept of an exclude
# list. So you install first, then remove
if exclude is not None:
removed = remove(exclude)
# Results are stored in a list of dictionaries in `FeatureResult`
if out["FeatureResult"]:
ret = {
"ExitCode": out["ExitCode"],
"RestartNeeded": False,
"Restarted": False,
"Features": {},
"Success": out["Success"],
}
# FeatureResult is a list of dicts, so each item is a dict
for item in out["FeatureResult"]:
ret["Features"][item["Name"]] = {
"DisplayName": item["DisplayName"],
"Message": item["Message"],
"RestartNeeded": item["RestartNeeded"],
"SkipReason": item["SkipReason"],
"Success": item["Success"],
}
if item["RestartNeeded"]:
ret["RestartNeeded"] = True
# Only items that installed are in the list of dictionaries
# Add 'Already installed' for features that aren't in the list of dicts
for item in feature.split(","):
if item not in ret["Features"]:
ret["Features"][item] = {"Message": "Already installed"}
# Some items in the exclude list were removed after installation
# Show what was done, update the dict
if exclude is not None:
# Features is a dict, so it only iterates over the keys
for item in removed["Features"]:
if item in ret["Features"]:
ret["Features"][item] = {
"Message": "Removed after installation (exclude)",
"DisplayName": removed["Features"][item]["DisplayName"],
"RestartNeeded": removed["Features"][item]["RestartNeeded"],
"SkipReason": removed["Features"][item]["SkipReason"],
"Success": removed["Features"][item]["Success"],
}
# Exclude items might need a restart
if removed["Features"][item]["RestartNeeded"]:
ret["RestartNeeded"] = True
# Restart here if needed
if restart:
if ret["RestartNeeded"]:
if __salt__["system.reboot"](in_seconds=True):
ret["Restarted"] = True
return ret
else:
# If we get here then all features were already installed
ret = {
"ExitCode": out["ExitCode"],
"Features": {},
"RestartNeeded": False,
"Restarted": False,
"Success": out["Success"],
}
for item in feature.split(","):
ret["Features"][item] = {"Message": "Already installed"}
return ret
def remove(feature, remove_payload=False, restart=False):
r"""
Remove an installed feature
.. note::
Some features require a reboot after installation/uninstallation. If
one of these features are modified, then other features cannot be
installed until the server is restarted. Additionally, some features
take a while to complete installation/uninstallation, so it is a good
idea to use the ``-t`` option to set a longer timeout.
Args:
feature (str, list):
The name of the feature(s) to remove. This can be a single feature,
a string of features in a comma delimited list (no spaces), or a
list of features.
.. versionadded:: 2018.3.0
Added the ability to pass a list of features to be removed.
remove_payload (Optional[bool]):
True will cause the feature to be removed from the side-by-side
store (``%SystemDrive%:\Windows\WinSxS``). Default is False
restart (Optional[bool]):
Restarts the computer when uninstall is complete, if required by the
role/feature removed. Default is False
Returns:
dict: A dictionary containing the results of the uninstall
CLI Example:
.. code-block:: bash
salt -t 600 '*' win_servermanager.remove Telnet-Client
"""
# If it is a list of features, make it a comma delimited string
if isinstance(feature, list):
feature = ",".join(feature)
# Use Uninstall-WindowsFeature on Windows 2012 (osversion 6.2) and later
# minions. Default to Remove-WindowsFeature for earlier releases of Windows.
# The newer command makes management tools optional so add them for parity
# with old behavior.
command = "Remove-WindowsFeature"
management_tools = ""
_remove_payload = ""
if salt.utils.versions.version_cmp(__grains__["osversion"], "6.2") >= 0:
command = "Uninstall-WindowsFeature"
management_tools = "-IncludeManagementTools"
# Only available with the `Uninstall-WindowsFeature` command
if remove_payload:
_remove_payload = "-Remove"
cmd = "{} -Name {} {} {} {} -WarningAction SilentlyContinue".format(
command,
_cmd_quote(feature),
management_tools,
_remove_payload,
"-Restart" if restart else "",
)
try:
out = _pshell_json(cmd)
except CommandExecutionError as exc:
if "ArgumentNotValid" in exc.message:
raise CommandExecutionError("Invalid Feature Name", info=exc.info)
raise
# Results are stored in a list of dictionaries in `FeatureResult`
if out["FeatureResult"]:
ret = {
"ExitCode": out["ExitCode"],
"RestartNeeded": False,
"Restarted": False,
"Features": {},
"Success": out["Success"],
}
for item in out["FeatureResult"]:
ret["Features"][item["Name"]] = {
"DisplayName": item["DisplayName"],
"Message": item["Message"],
"RestartNeeded": item["RestartNeeded"],
"SkipReason": item["SkipReason"],
"Success": item["Success"],
}
# Only items that installed are in the list of dictionaries
# Add 'Not installed' for features that aren't in the list of dicts
for item in feature.split(","):
if item not in ret["Features"]:
ret["Features"][item] = {"Message": "Not installed"}
return ret
else:
# If we get here then none of the features were installed
ret = {
"ExitCode": out["ExitCode"],
"Features": {},
"RestartNeeded": False,
"Restarted": False,
"Success": out["Success"],
}
for item in feature.split(","):
ret["Features"][item] = {"Message": "Not installed"}
return ret
|
PypiClean
|
/sidp-CanvasSync-0.2.4.tar.gz/sidp-CanvasSync-0.2.4/CanvasSync/entities/folder.py
|
from __future__ import print_function
# Third party
from six import text_type
# CanvasSync modules
from CanvasSync.utilities.ANSI import ANSI
from CanvasSync.entities.canvas_entity import CanvasEntity
from CanvasSync.entities.file import File
from CanvasSync.utilities import helpers
class Folder(CanvasEntity):
def __init__(self, folder_info, parent, black_list=False):
"""
Constructor method, initializes base Module class and adds all children Folder and/or Item objects to
the list of children
folder_info : dict | A dictionary of information on the Canvas Folder object
parent : object | The parent object, a Folder or Course object
"""
self.folder_info = folder_info
folder_id = self.folder_info[u"id"]
folder_name = helpers.get_corrected_name(self.folder_info[u"name"])
folder_path = parent.get_path() + folder_name
# Initialize base Module class
CanvasEntity.__init__(self,
id_number=folder_id,
name=folder_name,
sync_path=folder_path,
parent=parent,
identifier=u"folder")
self.black_list = black_list
def __repr__(self):
""" String representation, overwriting base class method """
status = ANSI.format(u"[SYNCED]", formatting=u"green")
return status + u" " * 7 + u"| " + u"\t" * self.indent + u"%s: %s" \
% (ANSI.format(u"Folder", formatting=u"folder"),
self.name)
def initialize_black_list(self):
"""
Some files may have been added to Module or Assignment objects already, so we do not need to store them again
This method initializes a list of all file names that exist in the hierarchy of the Course object so far
"""
# Get all entities listed in the Synchronizer object under the course.
entities = self.get_synchronizer().get_entities(self.get_course().get_id())
# Get list of names of all the File objects of the entities list
black_list = [x.get_id() for x in entities if x.get_identifier_string() == u"file"]
return black_list
def add_files(self):
""" Add all files stored by this folder to the list of children """
files = self.api.get_files_in_folder(self.id)
for file in files:
# Skip duplicates if this settings is active
# (otherwise the list will be empty)
if file[u"id"] in self.black_list:
continue
file = File(file, self, add_to_list_of_entities=False)
self.add_child(file)
def add_sub_folders(self):
""" Add all sub-folders stored by this folder to the list of children """
folders = self.api.get_folders_in_folder(self.id)
for folder in folders:
if folder[u"name"] == u"course_image":
# Do we really need that course image?
continue
folder = Folder(folder, self, black_list=self.black_list)
self.add_child(folder)
def walk(self, counter):
"""
Walk by adding all Files and Folder objects to the list of children
"""
print(text_type(self))
# If avoid duplicated setting is active, initialize black list of files found in Modules and
# Assignments if it was not passed to the object at initialization.
if not self.black_list and self.settings.avoid_duplicates:
self.black_list = self.initialize_black_list()
elif not self.settings.avoid_duplicates:
self.black_list = []
self.add_files()
self.add_sub_folders()
counter[0] += 1
for item in self:
item.walk(counter)
def sync(self):
"""
1) Adding all Files and Folder objects to the list of children
2) Synchronize all children objects
"""
print(text_type(self))
# If avoid duplicated setting is active, initialize black list of files found in Modules and
# Assignments if it was not passed to the object at initialization.
if not self.black_list and self.settings.avoid_duplicates:
self.black_list = self.initialize_black_list()
elif not self.settings.avoid_duplicates:
self.black_list = []
self.add_files()
self.add_sub_folders()
for item in self:
item.sync()
def show(self):
pass
|
PypiClean
|
/weconnect-cupra-daern-0.50.4.tar.gz/weconnect-cupra-daern-0.50.4/weconnect_cupra/api/cupra/elements/controls.py
|
import logging
import json
import requests
from weconnect_cupra.addressable import AddressableObject, ChangeableAttribute
from weconnect_cupra.elements.control_operation import ControlOperation
from weconnect_cupra.api.cupra.elements.charging_settings import ChargingSettings
from weconnect_cupra.api.cupra.elements.climatization_settings import ClimatizationSettings
from weconnect_cupra.elements.error import Error
from weconnect_cupra.errors import ControlError, SetterError
from weconnect_cupra.util import celsiusToKelvin, farenheitToKelvin
from weconnect_cupra.api.vw.domain import Domain
LOG = logging.getLogger("weconnect_cupra")
class Controls(AddressableObject):
def __init__(
self,
localAddress,
vehicle,
parent,
):
self.vehicle = vehicle
super().__init__(localAddress=localAddress, parent=parent)
self.update()
# Public API properties
self.climatizationControl = None
self.chargingControl = None
self.wakeupControl = ChangeableAttribute(
localAddress='wakeup', parent=self, value=ControlOperation.NONE, valueType=ControlOperation, valueSetter=self.__setWakeupControlChange)
def update(self):
for domain in self.vehicle.domains.values():
for status in domain.values():
if isinstance(status, ClimatizationSettings):
if self.climatizationControl is None:
self.climatizationControl = ChangeableAttribute(
localAddress='climatisation', parent=self, value=ControlOperation.NONE, valueType=(ControlOperation, float),
valueSetter=self.__setClimatizationControlChange)
elif isinstance(status, ChargingSettings):
if self.chargingControl is None:
self.chargingControl = ChangeableAttribute(
localAddress='charging', parent=self, value=ControlOperation.NONE, valueType=ControlOperation,
valueSetter=self.__setChargingControlChange)
def __setClimatizationControlChange(self, value): # noqa: C901
if isinstance(value, ControlOperation):
if value not in [ControlOperation.START, ControlOperation.STOP]:
raise ControlError('Could not control climatisation, control operation %s cannot be executed', value)
control = value
temperature = None
elif isinstance(value, (int, float)):
control = ControlOperation.START
temperature = float(value)
else:
raise ControlError('Could not control climatisation, control argument %s cannot be understood', value)
if control == ControlOperation.START:
# Build up settings dict
settingsDict = dict()
if control == ControlOperation.START:
if 'climatisation' not in self.vehicle.domains and 'climatisationSettings' not in self.vehicle.domains['climatisation']:
raise ControlError('Could not control climatisation, there are no climatisationSettings for the vehicle available.')
climatizationSettings = self.vehicle.domains['climatisation']['climatisationSettings']
for child in climatizationSettings.getLeafChildren():
if isinstance(child, ChangeableAttribute):
settingsDict[child.getLocalAddress()] = child.value
if temperature is not None:
if 'targetTemperature_C' in settingsDict:
settingsDict['targetTemperature_C'] = temperature
settingsDict['targetTemperature_K'] = celsiusToKelvin(temperature)
elif 'targetTemperature_K' not in settingsDict:
if 'targetTemperature_C' in settingsDict:
settingsDict['targetTemperature_K'] = celsiusToKelvin(settingsDict['targetTemperature_C'])
elif 'targetTemperature_F' in settingsDict:
settingsDict['targetTemperature_K'] = farenheitToKelvin(settingsDict['targetTemperature_F'])
else:
settingsDict['targetTemperature_K'] = celsiusToKelvin(20.5)
# Do API request to set temperature
data = json.dumps(settingsDict)
controlResponse = self.vehicle.fetcher.put(
url=f'https://ola.prod.code.seat.cloud.vwgroup.com/v1/vehicles/{self.vehicle.vin.value}/climatisation/requests/settings',
data=data,
allow_redirects=True,
headers={
"accept": '*/*',
"user-agent": "CUPRAApp%20-%20Store/20220207 CFNetwork/1240.0.4 Darwin/20.6.0",
"Content-Type": "application/json",
"accept-language": "de-de",
"Accept-Encoding": "gzip, deflate"
} )
if controlResponse.status_code != requests.codes['ok']:
errorDict = controlResponse.json()
if errorDict is not None and 'error' in errorDict:
error = Error(localAddress='error', parent=self, fromDict=errorDict['error'])
if error is not None:
message = ''
if error.message.enabled and error.message.value is not None:
message += error.message.value
if error.info.enabled and error.info.value is not None:
message += ' - ' + error.info.value
if error.retry.enabled and error.retry.value is not None:
if error.retry.value:
message += ' - Please retry in a moment'
else:
message += ' - No retry possible'
raise SetterError(f'Could not control climatisation ({message})')
else:
raise SetterError(f'Could not control climatisation ({controlResponse.status_code})')
raise SetterError(f'Could not control climatisation ({controlResponse.status_code})')
# Do API request to set run state
controlResponse = self.vehicle.fetcher.post(
url=f'https://ola.prod.code.seat.cloud.vwgroup.com/vehicles/{self.vehicle.vin.value}/climatisation/requests/{control.value}',
allow_redirects=True,
headers={
"accept": '*/*',
"user-agent": "CUPRAApp%20-%20Store/20220207 CFNetwork/1240.0.4 Darwin/20.6.0",
"Content-Type": "application/json",
"accept-language": "de-de",
"Accept-Encoding": "gzip, deflate"
} )
if controlResponse.status_code != requests.codes['ok']:
errorDict = controlResponse.json()
if errorDict is not None and 'error' in errorDict:
error = Error(localAddress='error', parent=self, fromDict=errorDict['error'])
if error is not None:
message = ''
if error.message.enabled and error.message.value is not None:
message += error.message.value
if error.info.enabled and error.info.value is not None:
message += ' - ' + error.info.value
if error.retry.enabled and error.retry.value is not None:
if error.retry.value:
message += ' - Please retry in a moment'
else:
message += ' - No retry possible'
raise SetterError(f'Could not control climatisation ({message})')
else:
raise SetterError(f'Could not control climatisation ({controlResponse.status_code})')
raise SetterError(f'Could not control climatisation ({controlResponse.status_code})')
# Build up response
responseDict = controlResponse.json()
if 'data' in responseDict and 'requestID' in responseDict['data']:
if self.vehicle.requestTracker is not None:
self.vehicle.requestTracker.trackRequest(responseDict['data']['requestID'], Domain.CLIMATISATION, 20, 120)
def __setChargingControlChange(self, value): # noqa: C901
# Validate inputs
if value not in [ControlOperation.START, ControlOperation.STOP]:
return
# Do API request
controlResponse = self.vehicle.fetcher.post(
url=f'https://ola.prod.code.seat.cloud.vwgroup.com/vehicles/{self.vehicle.vin.value}/charging/requests/{value.value}',
data='{}',
allow_redirects=True,
headers={
"accept": '*/*',
"user-agent": "CUPRAApp%20-%20Store/20220207 CFNetwork/1240.0.4 Darwin/20.6.0",
"Content-Type": "application/json",
"accept-language": "de-de",
"Accept-Encoding": "gzip, deflate"
} )
# Handle response
if controlResponse.status_code != requests.codes['ok']:
errorDict = controlResponse.json()
if errorDict is not None and 'error' in errorDict:
error = Error(localAddress='error', parent=self, fromDict=errorDict['error'])
if error is not None:
message = ''
if error.message.enabled and error.message.value is not None:
message += error.message.value
if error.info.enabled and error.info.value is not None:
message += ' - ' + error.info.value
if error.retry.enabled and error.retry.value is not None:
if error.retry.value:
message += ' - Please retry in a moment'
else:
message += ' - No retry possible'
raise SetterError(f'Could not control charging ({message})')
else:
raise SetterError(f'Could not control charging ({controlResponse.status_code})')
raise SetterError(f'Could not control charging ({controlResponse.status_code})')
# Maybe send response to request tracker
responseDict = controlResponse.json()
if 'data' in responseDict and 'requestID' in responseDict['data']:
if self.vehicle.requestTracker is not None:
self.vehicle.requestTracker.trackRequest(responseDict['data']['requestID'], Domain.CHARGING, 20, 120)
def __setWakeupControlChange(self, value): # noqa: C901
if value in [ControlOperation.START]:
url = f'https://mobileapi.apps.emea.vwapps.io/vehicles/{self.vehicle.vin.value}/vehiclewakeuptrigger'
controlResponse = self.vehicle.weConnect.session.post(url, data='{}', allow_redirects=True)
if controlResponse.status_code not in (requests.codes['ok'], requests.codes['no_content']):
errorDict = controlResponse.json()
if errorDict is not None and 'error' in errorDict:
error = Error(localAddress='error', parent=self, fromDict=errorDict['error'])
if error is not None:
message = ''
if error.message.enabled and error.message.value is not None:
message += error.message.value
if error.info.enabled and error.info.value is not None:
message += ' - ' + error.info.value
if error.retry.enabled and error.retry.value is not None:
if error.retry.value:
message += ' - Please retry in a moment'
else:
message += ' - No retry possible'
raise SetterError(f'Could not control wakeup ({message})')
else:
raise SetterError(f'Could not control wakeup ({controlResponse.status_code})')
raise SetterError(f'Could not control wakeup ({controlResponse.status_code})')
|
PypiClean
|
/cmip6-downscaling-0.1.11.tar.gz/cmip6-downscaling-0.1.11/cmip6_downscaling/methods/deepsd/utils.py
|
import fsspec
import numpy as np
import xarray as xr
import xesmf as xe
EPSILON = 1e-6 # small value to add to the denominator when normalizing to avoid division by 0
INPUT_SIZE = 51 # number of pixels in a patch example used for training deepsd model (in both lat/lon (or x/y) directions)
PATCH_STRIDE = 20 # number of pixels to skip when generating patches for deepsd training
INFERENCE_BATCH_SIZE = 500 # number of timesteps in each inference iteration
starting_resolutions = {
'ERA5': 2.0,
'GISS-E2-1-G': 2.0,
'BCC-CSM2-MR': 1.0,
'AWI-CM-1-1-MR': 1.0,
'BCC-ESM1': 2.0,
'SAM0-UNICON': 1.0,
'CanESM5': 2.0,
'MRI-ESM2-0': 1.0,
'MPI-ESM-1-2-HAM': 2.0,
'MPI-ESM1-2-HR': 1.0,
'MPI-ESM1-2-LR': 2.0,
'NESM3': 2.0,
'NorESM2-LM': 2.0,
'FGOALS-g3': 2.0,
'MIROC6': 1.0,
'ACCESS-CM2': 1.0,
'NorESM2-MM': 1.0,
'ACCESS-ESM1-5': 1.0,
'AWI-ESM-1-1-LR': 2.0,
'TaiESM1': 1.0,
'NorCPM1': 2.0,
'CMCC-ESM2': 1.0,
}
stacked_model_path = 'az://cmip6downscaling/training/deepsd/deepsd_models/{var}_{starting_resolution}d_to_0_25d/frozen_graph.pb'
output_node_name = '{var}_0_25/prediction:0'
def res_to_str(r):
return str(np.round(r, 2)).replace('.', '_')
def bilinear_interpolate(ds: xr.Dataset, output_degree: float) -> xr.Dataset:
"""
Bilinear inperpolate dataset to a global grid with specified step size
Parameters
----------
ds : xr.Dataset
Input dataset
output_degree : float
Step size for output dataset
Returns
-------
xr.Dataset
regridded dataset
"""
target_grid_ds = xe.util.grid_global(output_degree, output_degree, cf=True)
regridder = xe.Regridder(ds, target_grid_ds, "bilinear", extrap_method="nearest_s2d")
return regridder(ds, keep_attrs=True)
def conservative_interpolate(ds: xr.Dataset, output_degree: float) -> xr.Dataset:
"""
Conservative inperpolate dataset to a global grid with specified spacing
Parameters
----------
ds : xr.Dataset
Input dataset
output_degree : float
Spacing for output dataset
Returns
-------
xr.Dataset
Regridded dataset
"""
target_grid_ds = xe.util.grid_global(output_degree, output_degree, cf=True)
# conservative area regridding needs lat_bands and lon_bands
regridder = xe.Regridder(ds, target_grid_ds, "conservative")
return regridder(ds, keep_attrs=True)
def normalize(
ds: xr.Dataset, dims: list[str] = ['lat', 'lon'], epsilon: float = 1e-6
) -> xr.Dataset:
"""
Normalize dataset
Parameters
----------
ds : xr.Dataset
Input dataset
dim : list
Dimensions over which to apply mean and standard deviation
epsilon : float
Value to add to standard deviation during normalization
Returns
-------
xr.Dataset
Normalized dataset
"""
mean = ds.mean(dim=dims).compute()
std = ds.std(dim=dims).compute()
norm = (ds - mean) / (std + epsilon)
return norm
def build_grid_spec(
output_degree,
):
output_degree = np.round(output_degree, 2)
gcm_grid = xe.util.grid_global(output_degree, output_degree, cf=True)
nlat = len(gcm_grid.lat)
nlon = len(gcm_grid.lon)
lat_spacing = int(np.round(abs(gcm_grid.lat[0] - gcm_grid.lat[1]), 1) * 10)
lon_spacing = int(np.round(abs(gcm_grid.lon[0] - gcm_grid.lon[1]), 1) * 10)
min_lat = int(np.round(gcm_grid.lat.min(), 1))
min_lon = int(np.round(gcm_grid.lon.min(), 1))
grid_spec = f'{nlat:d}x{nlon:d}_gridsize_{lat_spacing:d}_{lon_spacing:d}_llcorner_{min_lat:d}_{min_lon:d}'
return grid_spec
def make_coarse_elev_path(
output_degree,
):
grid_spec = build_grid_spec(output_degree)
return f'az://scratch/deepsd/intermediate/elev/ERA5_full_space_{grid_spec}.zarr'
def get_elevation_data(output_degree):
elev_path = make_coarse_elev_path(output_degree)
elev_store = fsspec.get_mapper(elev_path)
return xr.open_zarr(elev_store)
def initialize_empty_dataset(lats, lons, times, output_path, var, chunks, attrs={}):
"""
Create an empty zarr store for output from inference
Parameters
----------
lats : coords
Coordinates for the new dataset
lons : coords
Coordinates for the new dataset
times : coords
Coordinates for the new dataset
output_path : UPath
Path to the zarr store
var : std
Name to give the variable in the empty dataset
chunks : dict
Chunking scheme for the empty dataset
attrs : dict
Attrs for the empty dataset
Returns
-------
xr.Dataset
Normalized dataset
"""
ds = xr.DataArray(
np.empty(shape=(len(times), len(lats), len(lons)), dtype=np.float32),
dims=["time", "lat", "lon"],
coords=[times, lats, lons],
attrs=attrs,
)
ds = ds.to_dataset(name=var).chunk(chunks)
print(output_path)
ds.to_zarr(output_path, mode="w", compute=False)
|
PypiClean
|
/ZMS-5.1.0-py3-none-any.whl/Products/zms/plugins/www/ZMSRepositoryManager/manage_main_diff.js
|
$(function() {
if ( $('.prettyTextDiff').length > 0 ) {
$('#repo-changesets').prepend('<h4>Changesets</h4>');
}
$(".prettyTextDiff").each(function() {
var diffContainer = ".diff";
$(this).prettyTextDiff({
cleanup:true,
originalContainer:".original",
changedContainer:".changed",
diffContainer:diffContainer
});
var $diffContainer = $(diffContainer,this);
var lines = $diffContainer.html().replace(/<span>/gi,'').replace(/<\/span>/gi,'').split("<br>");
var show = [];
var changed = false;
for (var i = 0; i < lines.length; i++) {
var line = lines[i];
changed |= line.indexOf("<"+"del>")>=0 || line.indexOf("<ins>")>=0;
if (changed) {
show.push(i);
}
changed &= !(line.indexOf("<"+"/del>")>=0 || line.indexOf("</ins>")>=0);
}
var html = [];
changed = false;
for (var i = 0; i < lines.length; i++) {
var line = lines[i];
changed |= line.indexOf("<"+"del>")>=0 || line.indexOf("<"+"ins>")>=0;
line = '<'+'span class="line-number'+(changed?' line-changed':'')+'">'+(i+1)+'</span> '+lines[i];
if (!(show.contains(i-1) || show.contains(i) || show.contains(i+1))) {
line = '<'+'span class="diff-unchanged d-none">'+line+'<'+'/span>';
}
else {
line = line+'<'+'br/>';
}
html.push(line);
changed &= !(line.indexOf("<"+"/del>")>=0 || line.indexOf("<"+"/ins>")>=0);
}
$diffContainer.html(html.join(""));
$("strong",$(this).closest("tr").prev("tr")).addClass("zmi-helper-clickable").click(function() {
if ($(".diff-unchanged.d-none",$diffContainer).length > 0) {
$(".diff-unchanged",$diffContainer).removeClass("d-none").after("<br>");
}
else if ($(".diff-unchanged",$diffContainer).length > 0) {
$(".diff-unchanged",$diffContainer).addClass("d-none").next("br").remove();
}
});
});
});
function focus_anchorid(anchorid) {
$('.table.focus').removeClass('focus');
$(document).scrollTop( $(anchorid).offset().top );
$(anchorid).addClass('focus');
}
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/users/item/contacts/contacts_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ....models import contact, contact_collection_response
from ....models.o_data_errors import o_data_error
from .count import count_request_builder
from .delta import delta_request_builder
class ContactsRequestBuilder():
"""
Provides operations to manage the contacts property of the microsoft.graph.user entity.
"""
def count(self) -> count_request_builder.CountRequestBuilder:
"""
Provides operations to count the resources in the collection.
"""
return count_request_builder.CountRequestBuilder(self.request_adapter, self.path_parameters)
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new ContactsRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/users/{user%2Did}/contacts{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[ContactsRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Get a contact collection from the default contacts folder of the signed-in user. There are two scenarios where an app can get contacts in another user's contact folder:
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
def create_post_request_information(self,body: Optional[contact.Contact] = None, request_configuration: Optional[ContactsRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Add a contact to the root Contacts folder or to the contacts endpoint of another contact folder.
Args:
body:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
def delta(self,) -> delta_request_builder.DeltaRequestBuilder:
"""
Provides operations to call the delta method.
Returns: delta_request_builder.DeltaRequestBuilder
"""
return delta_request_builder.DeltaRequestBuilder(self.request_adapter, self.path_parameters)
async def get(self,request_configuration: Optional[ContactsRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[contact_collection_response.ContactCollectionResponse]:
"""
Get a contact collection from the default contacts folder of the signed-in user. There are two scenarios where an app can get contacts in another user's contact folder:
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[contact_collection_response.ContactCollectionResponse]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, contact_collection_response.ContactCollectionResponse, response_handler, error_mapping)
async def post(self,body: Optional[contact.Contact] = None, request_configuration: Optional[ContactsRequestBuilderPostRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[contact.Contact]:
"""
Add a contact to the root Contacts folder or to the contacts endpoint of another contact folder.
Args:
body:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[contact.Contact]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.create_post_request_information(
body, request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, contact.Contact, response_handler, error_mapping)
@dataclass
class ContactsRequestBuilderGetQueryParameters():
"""
Get a contact collection from the default contacts folder of the signed-in user. There are two scenarios where an app can get contacts in another user's contact folder:
"""
# Include count of items
count: Optional[bool] = None
# Filter items by property values
filter: Optional[str] = None
# Order items by property values
orderby: Optional[List[str]] = None
# Search items by search phrases
search: Optional[str] = None
# Select properties to be returned
select: Optional[List[str]] = None
# Skip the first n items
skip: Optional[int] = None
# Show only the first n items
top: Optional[int] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "count":
return "%24count"
if original_name == "filter":
return "%24filter"
if original_name == "orderby":
return "%24orderby"
if original_name == "search":
return "%24search"
if original_name == "select":
return "%24select"
if original_name == "skip":
return "%24skip"
if original_name == "top":
return "%24top"
return original_name
@dataclass
class ContactsRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[ContactsRequestBuilder.ContactsRequestBuilderGetQueryParameters] = None
@dataclass
class ContactsRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/yowsup-celery-0.2.1.tar.gz/yowsup-celery-0.2.1/README.rst
|
===============================
yowsup celery
===============================
CI:
.. image:: https://img.shields.io/travis/jlmadurga/yowsup-celery.svg
:target: https://travis-ci.org/jlmadurga/yowsup-celery
.. image:: http://codecov.io/github/jlmadurga/yowsup-celery/coverage.svg?branch=master
:alt: Coverage
:target: http://codecov.io/github/jlmadurga/yowsup-celery?branch=master
.. image:: https://requires.io/github/jlmadurga/yowsup-celery/requirements.svg?branch=master
:target: https://requires.io/github/jlmadurga/yowsup-celery/requirements/?branch=master
:alt: Requirements Status
PyPI:
.. image:: https://img.shields.io/pypi/v/yowsup-celery.svg
:target: https://pypi.python.org/pypi/yowsup-celery
Docs:
.. image:: https://readthedocs.org/projects/yowsup-celery/badge/?version=latest
:target: https://readthedocs.org/projects/yowsup-celery/?badge=latest
:alt: Documentation Status
Yowsup integrated in a celery architecture
* Free software: ISC license
* Documentation: https://yowsup-celery.readthedocs.org.
Features
--------
* Celery app adapted to Yowsup
* Bootstep added to worker to initialize Yowsup and stopping when TERM (sometimes kill -9 is necessary)
* Options added to execute workers with different Whatsapp accounts
* Only works with gevent and threads as yowsup socket is shared between tasks
* Yowsup features included:
* Connect/Disconnect
* Send Text, Image and Audio Messages
* Receive Messages, Acks and Receipts
|
PypiClean
|
/fhir_types-0.2.4-py3-none-any.whl/fhir_types/FHIR_StructureMap_Group.py
|
from typing import Any, List, Literal, TypedDict
from .FHIR_Element import FHIR_Element
from .FHIR_id import FHIR_id
from .FHIR_string import FHIR_string
from .FHIR_StructureMap_Input import FHIR_StructureMap_Input
from .FHIR_StructureMap_Rule import FHIR_StructureMap_Rule
# A Map of relationships between 2 structures that can be used to transform data.
FHIR_StructureMap_Group = TypedDict(
"FHIR_StructureMap_Group",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# A unique name for the group for the convenience of human readers.
"name": FHIR_id,
# Extensions for name
"_name": FHIR_Element,
# Another group that this group adds rules to.
"extends": FHIR_id,
# Extensions for extends
"_extends": FHIR_Element,
# If this is the default rule set to apply for the source type or this combination of types.
"typeMode": Literal["none", "types", "type-and-types"],
# Extensions for typeMode
"_typeMode": FHIR_Element,
# Additional supporting documentation that explains the purpose of the group and the types of mappings within it.
"documentation": FHIR_string,
# Extensions for documentation
"_documentation": FHIR_Element,
# A name assigned to an instance of data. The instance must be provided when the mapping is invoked.
"input": List[FHIR_StructureMap_Input],
# Transform Rule from source to target.
"rule": List[FHIR_StructureMap_Rule],
},
total=False,
)
|
PypiClean
|
/dagster_custom-0.12.14.tar.gz/dagster_custom-0.12.14/dagster/core/execution/plan/execute_step.py
|
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union, cast
from dagster import check
from dagster.core.definitions import (
AssetKey,
AssetMaterialization,
ExpectationResult,
Materialization,
Output,
OutputDefinition,
SolidDefinition,
TypeCheck,
)
from dagster.core.definitions.decorators.solid import DecoratedSolidFunction
from dagster.core.definitions.event_metadata import EventMetadataEntry, PartitionMetadataEntry
from dagster.core.definitions.events import AssetLineageInfo, DynamicOutput
from dagster.core.errors import (
DagsterExecutionHandleOutputError,
DagsterInvariantViolationError,
DagsterStepOutputNotFoundError,
DagsterTypeCheckDidNotPass,
DagsterTypeCheckError,
DagsterTypeMaterializationError,
user_code_error_boundary,
)
from dagster.core.events import DagsterEvent
from dagster.core.execution.context.output import OutputContext
from dagster.core.execution.context.system import StepExecutionContext, TypeCheckContext
from dagster.core.execution.plan.compute import execute_core_compute
from dagster.core.execution.plan.inputs import StepInputData
from dagster.core.execution.plan.objects import StepSuccessData, TypeCheckData
from dagster.core.execution.plan.outputs import StepOutputData, StepOutputHandle
from dagster.core.execution.resolve_versions import resolve_step_output_versions
from dagster.core.storage.intermediate_storage import IntermediateStorageAdapter
from dagster.core.storage.io_manager import IOManager
from dagster.core.storage.tags import MEMOIZED_RUN_TAG
from dagster.core.types.dagster_type import DagsterType, DagsterTypeKind
from dagster.utils import ensure_gen, iterate_with_context
from dagster.utils.backcompat import experimental_functionality_warning
from dagster.utils.timing import time_execution_scope
from .compute import SolidOutputUnion
from .compute_generator import create_solid_compute_wrapper
from .utils import solid_execution_error_boundary
def _step_output_error_checked_user_event_sequence(
step_context: StepExecutionContext, user_event_sequence: Iterator[SolidOutputUnion]
) -> Iterator[SolidOutputUnion]:
"""
Process the event sequence to check for invariant violations in the event
sequence related to Output events emitted from the compute_fn.
This consumes and emits an event sequence.
"""
check.inst_param(step_context, "step_context", StepExecutionContext)
check.generator_param(user_event_sequence, "user_event_sequence")
step = step_context.step
op_label = step_context.describe_op()
output_names = list([output_def.name for output_def in step.step_outputs])
seen_outputs: Set[str] = set()
seen_mapping_keys: Dict[str, Set[str]] = defaultdict(set)
for user_event in user_event_sequence:
if not isinstance(user_event, (Output, DynamicOutput)):
yield user_event
continue
# do additional processing on Outputs
output = user_event
if not step.has_step_output(cast(str, output.output_name)):
raise DagsterInvariantViolationError(
f'Core compute for {op_label} returned an output "{output.output_name}" that does '
f"not exist. The available outputs are {output_names}"
)
step_output = step.step_output_named(cast(str, output.output_name))
output_def = step_context.pipeline_def.get_solid(step_output.solid_handle).output_def_named(
step_output.name
)
if isinstance(output, Output):
if output.output_name in seen_outputs:
raise DagsterInvariantViolationError(
f'Compute for {op_label} returned an output "{output.output_name}" multiple '
"times"
)
if output_def.is_dynamic:
raise DagsterInvariantViolationError(
f'Compute for {op_label} for output "{output.output_name}" defined as dynamic '
"must yield DynamicOutput, got Output."
)
else:
if not output_def.is_dynamic:
raise DagsterInvariantViolationError(
f"Compute for {op_label} yielded a DynamicOutput, but did not use "
"DynamicOutputDefinition."
)
if output.mapping_key in seen_mapping_keys[output.output_name]:
raise DagsterInvariantViolationError(
f"Compute for {op_label} yielded a DynamicOutput with mapping_key "
f'"{output.mapping_key}" multiple times.'
)
seen_mapping_keys[output.output_name].add(output.mapping_key)
yield output
seen_outputs.add(output.output_name)
for step_output in step.step_outputs:
step_output_def = step_context.solid_def.output_def_named(step_output.name)
if not step_output_def.name in seen_outputs and not step_output_def.optional:
if step_output_def.dagster_type.kind == DagsterTypeKind.NOTHING:
step_context.log.info(
f'Emitting implicit Nothing for output "{step_output_def.name}" on {op_label}'
)
yield Output(output_name=step_output_def.name, value=None)
elif not step_output_def.is_dynamic:
raise DagsterStepOutputNotFoundError(
(
f"Core compute for {op_label} did not return an output for non-optional "
f'output "{step_output_def.name}"'
),
step_key=step.key,
output_name=step_output_def.name,
)
def do_type_check(context: TypeCheckContext, dagster_type: DagsterType, value: Any) -> TypeCheck:
type_check = dagster_type.type_check(context, value)
if not isinstance(type_check, TypeCheck):
return TypeCheck(
success=False,
description=(
"Type checks must return TypeCheck. Type check for type {type_name} returned "
"value of type {return_type} when checking runtime value of type {dagster_type}."
).format(
type_name=dagster_type.display_name,
return_type=type(type_check),
dagster_type=type(value),
),
)
return type_check
def _create_step_input_event(
step_context: StepExecutionContext, input_name: str, type_check: TypeCheck, success: bool
) -> DagsterEvent:
return DagsterEvent.step_input_event(
step_context,
StepInputData(
input_name=input_name,
type_check_data=TypeCheckData(
success=success,
label=input_name,
description=type_check.description if type_check else None,
metadata_entries=type_check.metadata_entries if type_check else [],
),
),
)
def _type_checked_event_sequence_for_input(
step_context: StepExecutionContext, input_name: str, input_value: Any
) -> Iterator[DagsterEvent]:
check.inst_param(step_context, "step_context", StepExecutionContext)
check.str_param(input_name, "input_name")
step_input = step_context.step.step_input_named(input_name)
input_def = step_input.source.get_input_def(step_context.pipeline_def)
dagster_type = input_def.dagster_type
type_check_context = step_context.for_type(dagster_type)
input_type = type(input_value)
op_label = step_context.describe_op()
with user_code_error_boundary(
DagsterTypeCheckError,
lambda: (
f'Error occurred while type-checking input "{input_name}" of {op_label}, with Python '
f"type {input_type} and Dagster type {dagster_type.display_name}"
),
log_manager=type_check_context.log,
):
type_check = do_type_check(type_check_context, dagster_type, input_value)
yield _create_step_input_event(
step_context, input_name, type_check=type_check, success=type_check.success
)
if not type_check.success:
raise DagsterTypeCheckDidNotPass(
description=(
f'Type check failed for step input "{input_name}" - '
f'expected type "{dagster_type.display_name}". '
f"Description: {type_check.description}"
),
metadata_entries=type_check.metadata_entries,
dagster_type=dagster_type,
)
def _type_check_output(
step_context: StepExecutionContext,
step_output_handle: StepOutputHandle,
output: Any,
version: Optional[str],
) -> Iterator[DagsterEvent]:
check.inst_param(step_context, "step_context", StepExecutionContext)
check.inst_param(output, "output", (Output, DynamicOutput))
step_output = step_context.step.step_output_named(output.output_name)
step_output_def = step_context.solid_def.output_def_named(step_output.name)
dagster_type = step_output_def.dagster_type
type_check_context = step_context.for_type(dagster_type)
op_label = step_context.describe_op()
output_type = type(output.value)
with user_code_error_boundary(
DagsterTypeCheckError,
lambda: (
f'Error occurred while type-checking output "{output.output_name}" of {op_label}, with '
f"Python type {output_type} and Dagster type {dagster_type.display_name}"
),
log_manager=type_check_context.log,
):
type_check = do_type_check(type_check_context, dagster_type, output.value)
yield DagsterEvent.step_output_event(
step_context=step_context,
step_output_data=StepOutputData(
step_output_handle=step_output_handle,
type_check_data=TypeCheckData(
success=type_check.success,
label=step_output_handle.output_name,
description=type_check.description if type_check else None,
metadata_entries=type_check.metadata_entries if type_check else [],
),
version=version,
metadata_entries=[
entry for entry in output.metadata_entries if isinstance(entry, EventMetadataEntry)
],
),
)
if not type_check.success:
raise DagsterTypeCheckDidNotPass(
description='Type check failed for step output "{output_name}" - expected type "{dagster_type}".'.format(
output_name=output.output_name,
dagster_type=dagster_type.display_name,
),
metadata_entries=type_check.metadata_entries,
dagster_type=dagster_type,
)
def core_dagster_event_sequence_for_step(
step_context: StepExecutionContext,
) -> Iterator[DagsterEvent]:
"""
Execute the step within the step_context argument given the in-memory
events. This function yields a sequence of DagsterEvents, but without
catching any exceptions that have bubbled up during the computation
of the step.
"""
check.inst_param(step_context, "step_context", StepExecutionContext)
if step_context.previous_attempt_count > 0:
yield DagsterEvent.step_restarted_event(step_context, step_context.previous_attempt_count)
else:
yield DagsterEvent.step_start_event(step_context)
inputs = {}
input_lineage = []
for step_input in step_context.step.step_inputs:
input_def = step_input.source.get_input_def(step_context.pipeline_def)
dagster_type = input_def.dagster_type
if dagster_type.kind == DagsterTypeKind.NOTHING:
continue
input_lineage.extend(step_input.source.get_asset_lineage(step_context))
for event_or_input_value in ensure_gen(step_input.source.load_input_object(step_context)):
if isinstance(event_or_input_value, DagsterEvent):
yield event_or_input_value
else:
check.invariant(step_input.name not in inputs)
inputs[step_input.name] = event_or_input_value
for input_name, input_value in inputs.items():
for evt in check.generator(
_type_checked_event_sequence_for_input(step_context, input_name, input_value)
):
yield evt
input_lineage = _dedup_asset_lineage(input_lineage)
# The core execution loop expects a compute generator in a specific format: a generator that
# takes a context and dictionary of inputs as input, yields output events. If a solid definition
# was generated from the @solid or @lambda_solid decorator, then compute_fn needs to be coerced
# into this format. If the solid definition was created directly, then it is expected that the
# compute_fn is already in this format.
if isinstance(step_context.solid_def.compute_fn, DecoratedSolidFunction):
core_gen = create_solid_compute_wrapper(step_context.solid_def)
else:
core_gen = step_context.solid_def.compute_fn
with time_execution_scope() as timer_result:
user_event_sequence = check.generator(
execute_core_compute(
step_context,
inputs,
core_gen,
)
)
# It is important for this loop to be indented within the
# timer block above in order for time to be recorded accurately.
for user_event in check.generator(
_step_output_error_checked_user_event_sequence(step_context, user_event_sequence)
):
if isinstance(user_event, (Output, DynamicOutput)):
for evt in _type_check_and_store_output(step_context, user_event, input_lineage):
yield evt
# for now, I'm ignoring AssetMaterializations yielded manually, but we might want
# to do something with these in the above path eventually
elif isinstance(user_event, (AssetMaterialization, Materialization)):
yield DagsterEvent.asset_materialization(step_context, user_event, input_lineage)
elif isinstance(user_event, ExpectationResult):
yield DagsterEvent.step_expectation_result(step_context, user_event)
else:
check.failed(
"Unexpected event {event}, should have been caught earlier".format(
event=user_event
)
)
yield DagsterEvent.step_success_event(
step_context, StepSuccessData(duration_ms=timer_result.millis)
)
def _type_check_and_store_output(
step_context: StepExecutionContext,
output: Union[DynamicOutput, Output],
input_lineage: List[AssetLineageInfo],
) -> Iterator[DagsterEvent]:
check.inst_param(step_context, "step_context", StepExecutionContext)
check.inst_param(output, "output", (Output, DynamicOutput))
check.list_param(input_lineage, "input_lineage", AssetLineageInfo)
mapping_key = output.mapping_key if isinstance(output, DynamicOutput) else None
step_output_handle = StepOutputHandle(
step_key=step_context.step.key, output_name=output.output_name, mapping_key=mapping_key
)
# If we are executing using the execute_in_process API, then we allow for the outputs of solids
# to be directly captured to a dictionary after they are computed.
if step_context.output_capture is not None:
step_context.output_capture[step_output_handle] = output.value
# capture output at the step level for threading th computed output values to hook context
step_context.step_output_capture[step_output_handle] = output.value
version = (
resolve_step_output_versions(
step_context.pipeline_def, step_context.execution_plan, step_context.resolved_run_config
).get(step_output_handle)
if MEMOIZED_RUN_TAG in step_context.pipeline.get_definition().tags
else None
)
for output_event in _type_check_output(step_context, step_output_handle, output, version):
yield output_event
for evt in _store_output(step_context, step_output_handle, output, input_lineage):
yield evt
for evt in _create_type_materializations(step_context, output.output_name, output.value):
yield evt
def _asset_key_and_partitions_for_output(
output_context: OutputContext,
output_def: OutputDefinition,
output_manager: IOManager,
) -> Tuple[Optional[AssetKey], Set[str]]:
manager_asset_key = output_manager.get_output_asset_key(output_context)
if output_def.is_asset:
if manager_asset_key is not None:
solid_def = cast(SolidDefinition, output_context.solid_def)
raise DagsterInvariantViolationError(
f'Both the OutputDefinition and the IOManager of output "{output_def.name}" on '
f'solid "{solid_def.name}" associate it with an asset. Either remove '
"the asset_key parameter on the OutputDefinition or use an IOManager that does not "
"specify an AssetKey in its get_output_asset_key() function."
)
return (
output_def.get_asset_key(output_context),
output_def.get_asset_partitions(output_context) or set(),
)
elif manager_asset_key:
return manager_asset_key, output_manager.get_output_asset_partitions(output_context)
return None, set()
def _dedup_asset_lineage(asset_lineage: List[AssetLineageInfo]) -> List[AssetLineageInfo]:
"""Method to remove duplicate specifications of the same Asset/Partition pair from the lineage
information. Duplicates can occur naturally when calculating transitive dependencies from solids
with multiple Outputs, which in turn have multiple Inputs (because each Output of the solid will
inherit all dependencies from all of the solid Inputs).
"""
key_partition_mapping: Dict[AssetKey, Set[str]] = defaultdict(set)
for lineage_info in asset_lineage:
if not lineage_info.partitions:
key_partition_mapping[lineage_info.asset_key] |= set()
for partition in lineage_info.partitions:
key_partition_mapping[lineage_info.asset_key].add(partition)
return [
AssetLineageInfo(asset_key=asset_key, partitions=partitions)
for asset_key, partitions in key_partition_mapping.items()
]
def _get_output_asset_materializations(
asset_key: AssetKey,
asset_partitions: Set[str],
output: Union[Output, DynamicOutput],
output_def: OutputDefinition,
io_manager_metadata_entries: List[Union[EventMetadataEntry, PartitionMetadataEntry]],
) -> Iterator[AssetMaterialization]:
all_metadata = output.metadata_entries + io_manager_metadata_entries
if asset_partitions:
metadata_mapping: Dict[str, List["EventMetadataEntry"]] = {
partition: [] for partition in asset_partitions
}
for entry in all_metadata:
# if you target a given entry at a partition, only apply it to the requested partition
# otherwise, apply it to all partitions
if isinstance(entry, PartitionMetadataEntry):
if entry.partition not in asset_partitions:
raise DagsterInvariantViolationError(
f"Output {output_def.name} associated a metadata entry ({entry}) with the partition "
f"`{entry.partition}`, which is not one of the declared partition mappings ({asset_partitions})."
)
metadata_mapping[entry.partition].append(entry.entry)
else:
for partition in metadata_mapping.keys():
metadata_mapping[partition].append(entry)
for partition in asset_partitions:
yield AssetMaterialization(
asset_key=asset_key,
partition=partition,
metadata_entries=metadata_mapping[partition],
)
else:
for entry in all_metadata:
if isinstance(entry, PartitionMetadataEntry):
raise DagsterInvariantViolationError(
f"Output {output_def.name} got a PartitionMetadataEntry ({entry}), but "
"is not associated with any specific partitions."
)
yield AssetMaterialization(
asset_key=asset_key, metadata_entries=cast(List["EventMetadataEntry"], all_metadata)
)
def _store_output(
step_context: StepExecutionContext,
step_output_handle: StepOutputHandle,
output: Union[Output, DynamicOutput],
input_lineage: List[AssetLineageInfo],
) -> Iterator[DagsterEvent]:
output_def = step_context.solid_def.output_def_named(step_output_handle.output_name)
output_manager = step_context.get_io_manager(step_output_handle)
output_context = step_context.get_output_context(step_output_handle)
handle_output_res = output_manager.handle_output(output_context, output.value)
manager_materializations = []
manager_metadata_entries = []
if handle_output_res is not None:
for elt in iterate_with_context(
lambda: solid_execution_error_boundary(
DagsterExecutionHandleOutputError,
msg_fn=lambda: (
f'Error occurred while handling output "{output_context.name}" of '
f'step "{step_context.step.key}":'
),
step_context=step_context,
step_key=step_context.step.key,
output_name=output_context.name,
),
ensure_gen(handle_output_res),
):
if isinstance(elt, AssetMaterialization):
manager_materializations.append(elt)
elif isinstance(elt, (EventMetadataEntry, PartitionMetadataEntry)):
experimental_functionality_warning(
"Yielding metadata from an IOManager's handle_output() function"
)
manager_metadata_entries.append(elt)
else:
raise DagsterInvariantViolationError(
f"IO manager on output {output_def.name} has returned "
f"value {elt} of type {type(elt).__name__}. The return type can only be "
"one of AssetMaterialization, EventMetadataEntry, PartitionMetadataEntry."
)
# do not alter explicitly created AssetMaterializations
for materialization in manager_materializations:
yield DagsterEvent.asset_materialization(step_context, materialization, input_lineage)
asset_key, partitions = _asset_key_and_partitions_for_output(
output_context, output_def, output_manager
)
if asset_key:
for materialization in _get_output_asset_materializations(
asset_key,
partitions,
output,
output_def,
manager_metadata_entries,
):
yield DagsterEvent.asset_materialization(step_context, materialization, input_lineage)
yield DagsterEvent.handled_output(
step_context,
output_name=step_output_handle.output_name,
manager_key=output_def.io_manager_key,
message_override=f'Handled input "{step_output_handle.output_name}" using intermediate storage'
if isinstance(output_manager, IntermediateStorageAdapter)
else None,
metadata_entries=[
entry for entry in manager_metadata_entries if isinstance(entry, EventMetadataEntry)
],
)
def _create_type_materializations(
step_context: StepExecutionContext, output_name: str, value: Any
) -> Iterator[DagsterEvent]:
"""If the output has any dagster type materializers, runs them."""
step = step_context.step
current_handle = step.solid_handle
# check for output mappings at every point up the composition hierarchy
while current_handle:
solid_config = step_context.resolved_run_config.solids.get(current_handle.to_string())
current_handle = current_handle.parent
if solid_config is None:
continue
for output_spec in solid_config.outputs.type_materializer_specs:
check.invariant(len(output_spec) == 1)
config_output_name, output_spec = list(output_spec.items())[0]
if config_output_name == output_name:
step_output = step.step_output_named(output_name)
with user_code_error_boundary(
DagsterTypeMaterializationError,
msg_fn=lambda: (
"Error occurred during output materialization:"
f'\n output name: "{output_name}"'
f'\n solid invocation: "{step_context.solid.name}"'
f'\n solid definition: "{step_context.solid_def.name}"'
),
log_manager=step_context.log,
):
output_def = step_context.solid_def.output_def_named(step_output.name)
dagster_type = output_def.dagster_type
materializations = dagster_type.materializer.materialize_runtime_values(
step_context, output_spec, value
)
for materialization in materializations:
if not isinstance(materialization, (AssetMaterialization, Materialization)):
raise DagsterInvariantViolationError(
(
"materialize_runtime_values on type {type_name} has returned "
"value {value} of type {python_type}. You must return an "
"AssetMaterialization."
).format(
type_name=dagster_type.display_name,
value=repr(materialization),
python_type=type(materialization).__name__,
)
)
yield DagsterEvent.asset_materialization(step_context, materialization)
|
PypiClean
|
/clowder-repo-4.0b6.tar.gz/clowder-repo-4.0b6/clowder/cli/branch.py
|
import argparse
import clowder.util.formatting as fmt
from clowder.clowder_controller import CLOWDER_CONTROLLER
from clowder.config import Config
from clowder.model.util import filter_projects
from clowder.util.decorators import (
print_clowder_name,
print_clowder_repo_status,
valid_clowder_yaml_required
)
from .util import add_parser_arguments
def add_branch_parser(subparsers: argparse._SubParsersAction) -> None: # noqa
"""Add clowder branch parser
:param argparse._SubParsersAction subparsers: Subparsers action to add parser to
"""
arguments = [
(['projects'], dict(metavar='<project|group>', default='default', nargs='*',
choices=CLOWDER_CONTROLLER.project_choices_with_default,
help=fmt.project_options_help_message('projects and groups to show branches for')))
]
parser = subparsers.add_parser('branch', help='Display current branches')
parser.formatter_class = argparse.RawTextHelpFormatter
add_parser_arguments(parser, arguments)
mutually_exclusive_arguments = [
(['--all', '-a'], dict(action='store_true', help='show local and remote branches')),
(['--remote', '-r'], dict(action='store_true', help='show remote branches'))
]
mutually_exclusive_group = parser.add_mutually_exclusive_group()
add_parser_arguments(mutually_exclusive_group, mutually_exclusive_arguments)
parser.set_defaults(func=branch)
@valid_clowder_yaml_required
@print_clowder_name
@print_clowder_repo_status
def branch(args) -> None:
"""Clowder branch command private implementation"""
if args.remote:
local = False
remote = True
elif args.all:
local = True
remote = True
else:
local = True
remote = False
config = Config(CLOWDER_CONTROLLER.name, CLOWDER_CONTROLLER.project_choices)
projects = config.process_projects_arg(args.projects)
projects = filter_projects(CLOWDER_CONTROLLER.projects, projects)
for project in projects:
print(project.status())
project.branch(local=local, remote=remote)
|
PypiClean
|
/eulith_web3-0.14.4.tar.gz/eulith_web3-0.14.4/src/eulith_web3/asn_dump.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import open, bytes, str
import sys
import base64
import binascii
import asn1
def read_pem(input_file):
"""
Reads PEM formatted input like private keys and public keys used for SSL/TLS encryption, and X.509/SSL certificates.
original file can be found here: https://github.com/andrivet/python-asn1/blob/master/examples/dump.py
"""
data = []
state = 0
for line in input_file:
if state == 0:
if line.startswith('-----BEGIN'):
state = 1
elif state == 1:
if line.startswith('-----END'):
state = 2
else:
data.append(line)
elif state == 2:
break
if state != 2:
raise ValueError('No PEM encoded input found')
data = ''.join(data)
return base64.b64decode(data)
# maps ASN.1 tag IDs to string names to convert between abstract syntax notation 1
tag_id_to_string_map = {
asn1.Numbers.Boolean: "BOOLEAN",
asn1.Numbers.Integer: "INTEGER",
asn1.Numbers.BitString: "BIT STRING",
asn1.Numbers.OctetString: "OCTET STRING",
asn1.Numbers.Null: "NULL",
asn1.Numbers.ObjectIdentifier: "OBJECT",
asn1.Numbers.PrintableString: "PRINTABLESTRING",
asn1.Numbers.IA5String: "IA5STRING",
asn1.Numbers.UTCTime: "UTCTIME",
asn1.Numbers.GeneralizedTime: "GENERALIZED TIME",
asn1.Numbers.Enumerated: "ENUMERATED",
asn1.Numbers.Sequence: "SEQUENCE",
asn1.Numbers.Set: "SET"
}
# maps ASN.1 class IDs to string names
class_id_to_string_map = {
asn1.Classes.Universal: "U",
asn1.Classes.Application: "A",
asn1.Classes.Context: "C",
asn1.Classes.Private: "P"
}
# maps object IDs to string names
object_id_to_string_map = {
"1.2.840.113549.1.1.1": "rsaEncryption",
"1.2.840.113549.1.1.5": "sha1WithRSAEncryption",
"1.3.6.1.5.5.7.1.1": "authorityInfoAccess",
"2.5.4.3": "commonName",
"2.5.4.4": "surname",
"2.5.4.5": "serialNumber",
"2.5.4.6": "countryName",
"2.5.4.7": "localityName",
"2.5.4.8": "stateOrProvinceName",
"2.5.4.9": "streetAddress",
"2.5.4.10": "organizationName",
"2.5.4.11": "organizationalUnitName",
"2.5.4.12": "title",
"2.5.4.13": "description",
"2.5.4.42": "givenName",
"1.2.840.113549.1.9.1": "emailAddress",
"2.5.29.14": "X509v3 Subject Key Identifier",
"2.5.29.15": "X509v3 Key Usage",
"2.5.29.16": "X509v3 Private Key Usage Period",
"2.5.29.17": "X509v3 Subject Alternative Name",
"2.5.29.18": "X509v3 Issuer Alternative Name",
"2.5.29.19": "X509v3 Basic Constraints",
"2.5.29.30": "X509v3 Name Constraints",
"2.5.29.31": "X509v3 CRL Distribution Points",
"2.5.29.32": "X509v3 Certificate Policies Extension",
"2.5.29.33": "X509v3 Policy Mappings",
"2.5.29.35": "X509v3 Authority Key Identifier",
"2.5.29.36": "X509v3 Policy Constraints",
"2.5.29.37": "X509v3 Extended Key Usage"
}
def tag_id_to_string(identifier):
if identifier in tag_id_to_string_map:
return tag_id_to_string_map[identifier]
return '{:#02x}'.format(identifier)
def class_id_to_string(identifier):
if identifier in class_id_to_string_map:
return class_id_to_string_map[identifier]
raise ValueError('Illegal class: {:#02x}'.format(identifier))
def object_identifier_to_string(identifier):
if identifier in object_id_to_string_map:
return object_id_to_string_map[identifier]
return identifier
def value_to_string(tag_number, value):
if tag_number == asn1.Numbers.ObjectIdentifier:
return object_identifier_to_string(value)
elif isinstance(value, bytes):
return '0x' + str(binascii.hexlify(value).upper())
elif isinstance(value, str):
return value
else:
return repr(value)
def pretty_print(input_stream, output_stream=sys.stdout, indent=0):
while not input_stream.eof():
tag = input_stream.peek()
if tag.typ == asn1.Types.Primitive:
tag, value = input_stream.read()
output_stream.write(' ' * indent)
output_stream.write('[{}] {}: {}\n'.format(class_id_to_string(tag.cls), tag_id_to_string(tag.nr),
value_to_string(tag.nr, value)))
elif tag.typ == asn1.Types.Constructed:
output_stream.write(' ' * indent)
output_stream.write('[{}] {}\n'.format(class_id_to_string(tag.cls), tag_id_to_string(tag.nr)))
input_stream.enter()
pretty_print(input_stream, output_stream, indent + 2)
input_stream.leave()
|
PypiClean
|
/hurm.fe-0.10.tar.gz/hurm.fe-0.10/hurm/fe/static/app/module/Activities.js
|
/*jsl:declare Ext*/
/*jsl:declare _*/
/*jsl:declare MP*/
/*jsl:declare HuRM*/
Ext.define('HuRM.module.Activities.Actions', {
extend: 'MP.action.StoreAware',
uses: [
'Ext.Action',
'Ext.form.field.TextArea',
'MP.form.Panel',
'MP.window.Notification'
],
statics: {
EDIT_ACTION: 'edit_activity',
PAYLOADS_ACTION: 'activity_payloads'
},
initActions: function() {
var me = this;
var ids = me.statics();
me.callParent();
me.editAction = me.addAction(new Ext.Action({
itemId: ids.EDIT_ACTION,
text: _('Modify'),
tooltip: _('Edit selected activity.'),
iconCls: 'edit-record-icon',
disabled: true,
needsOneSelectedRow: true,
handler: function() {
var record = me.component.getSelectionModel().getSelection()[0];
me.showEditWindow(record);
}
}));
me.payloadsAction = me.addAction(new Ext.Action({
itemId: ids.PAYLOADS_ACTION,
text: _('Payloads'),
tooltip: _('Modify associated payloads.'),
iconCls: 'edit-record-icon',
disabled: true,
needsOneSelectedRow: true,
needsCleanStore: true,
handler: function() {
var record = me.component.getSelectionModel().getSelection()[0],
module = me.module.app.getModule('activity-payloads-win');
module.createOrShowWindow(record);
}
}));
},
attachActions: function() {
var me = this;
me.callParent();
var tbar = me.component.child('#ttoolbar');
tbar.add(2, ' ', me.editAction, me.payloadsAction);
me.component.on({
itemdblclick: function() {
if(!me.editAction.isDisabled())
me.editAction.execute();
}
});
me.component.store.on({
add: function(store, records) {
//jsl:unused store
var record = records[0];
me.showEditWindow(record);
}
});
},
showEditWindow: function(record) {
var me = this;
var desktop = me.module.app.getDesktop();
var win = desktop.getWindow('edit-activity-win');
// If the window is already present, destroy and recreate it,
// to reapply configuration and filters
if(win) {
win.destroy();
}
var metadata = me.module.config.metadata,
size = desktop.getReasonableWindowSize(500, 300),
editors = metadata.editors({
'*': {
editor: MP.form.Panel.getDefaultEditorSettingsFunction('100%')
},
'note': {
editor: {
xtype: 'textarea'
}
}
}),
form = Ext.create('MP.form.Panel', {
autoScroll: true,
fieldDefaults: {
labelWidth: 100,
margin: '15 10 0 10'
},
items: [editors.description,
editors.allowoverlappedtasks,
editors.allowoverlappedduties,
editors.note],
buttons: [{
text: _('Cancel'),
handler: function() {
if(record.phantom) {
record.store.deleteRecord(record);
}
win.close();
}
}, {
text: _('Confirm'),
formBind: true,
handler: function() {
if(form.isValid()) {
form.updateRecord(record);
win.close();
Ext.create("MP.window.Notification", {
position: 't',
width: 260,
title: _('Changes have been applied…'),
html: _('Your changes have been applied <strong>locally</strong>.<br/><br/>To make them permanent you must click on the <blink>Save</blink> button.'),
iconCls: 'info-icon'
}).show();
}
}
}]
});
win = desktop.createWindow({
id: 'edit-activity-win',
title: _('Edit activity'),
iconCls: 'edit-activity-icon',
width: size.width,
height: size.height,
modal: true,
items: form,
closable: false,
minimizable: false,
maximizable: false,
resizable: false
});
form.loadRecord(record);
win.show();
}
});
Ext.define('HuRM.module.Activities', {
extend: 'MP.desktop.Module',
requires: [
'MP.grid.Panel'
],
uses: [
'HuRM.module.Activities.Actions'
],
id: 'activities-win',
iconCls: 'activities-icon',
launcherText: _('Activities'),
launcherTooltip: _('<b>Activities</b><br/>Activities management'),
config: {
xtype: 'editable-grid',
pageSize: 23,
autoShowAllEditors: false,
clicksToEdit: 0,
dataURL: '/data/activities',
sorters: ['description'],
stripeRows: true
},
getConfig: function(callback) {
var me = this,
config = me.config;
if(!config.metadata) {
MP.data.MetaData.fetch(config.dataURL, me, function(metadata) {
var overrides = {},
fields = metadata.fields(overrides);
Ext.apply(config, {
metadata: metadata,
fields: fields,
columns: metadata.columns(overrides, false),
idProperty: metadata.primary_key,
totalProperty: metadata.count_slot,
successProperty: metadata.success_slot,
rootProperty: metadata.root_slot,
plugins: [
Ext.create('HuRM.module.Activities.Actions', { module: me }),
]
});
callback(config);
me.app.on('logout', function() { delete config.metadata; }, me, { single: true });
});
} else {
callback(config);
}
},
createOrShowWindow: function() {
var me = this,
desktop = me.app.getDesktop(),
win = desktop.getWindow(me.id);
// If the window is already present, destroy and recreate it,
// to reapply configuration and filters
if(win) {
win.destroy();
}
me.configure(
[me.getConfig],
function(done) {
var size = desktop.getReasonableWindowSize(570, 250);
win = desktop.createWindow({
id: me.id,
title: me.getLauncherText(),
width: size.width,
height: size.height,
iconCls: me.iconCls,
items: [me.config]
});
var grid = win.child('editable-grid');
// Fetch the first page of records, and when done show
// the window
grid.store.load({
params: {start: 0, limit: me.pageSize},
callback: function() {
win.on({show: done, single: true});
win.show();
}
});
var da = grid.findActionById('delete');
da.shouldBeDisabled = me.shouldDisableDeleteAction.bind(grid);
});
},
shouldDisableDeleteAction: function() {
var grid = this;
var sm = grid.getSelectionModel();
if(sm.getCount() > 0) {
var selrecs = sm.getSelection();
var disable = false;
for(var i=selrecs.length-1; i>=0; i--) {
var record = selrecs[i];
if(record.get('Tasks') > 0) {
disable = true;
break;
}
}
return disable;
} else {
return true;
}
}
});
|
PypiClean
|
/TxGNN-0.0.3.tar.gz/TxGNN-0.0.3/txgnn/TxEval.py
|
from .utils import *
class TxEval:
def __init__(self, model):
self.df, self.df_train, self.df_valid, self.df_test, self.data_folder, self.G, self.best_model, self.weight_bias_track, self.wandb = model.df, model.df_train, model.df_valid, model.df_test, model.data_folder, model.G, model.best_model, model.weight_bias_track, model.wandb
self.device = model.device
self.disease_rel_types = ['rev_contraindication', 'rev_indication', 'rev_off-label use']
self.split = model.split
def eval_disease_centric(self, disease_idxs, relation = None, save_result = False, show_plot = False, verbose = False, save_name = None, return_raw = False, simulate_random = True):
if self.split == 'full_graph':
# set only_prediction to True during full graph training
only_prediction = True
else:
only_prediction = False
if disease_idxs == 'test_set':
disease_idxs = None
self.out = disease_centric_evaluation(self.df, self.df_train, self.df_valid, self.df_test, self.data_folder, self.G, self.best_model,self.device, disease_idxs, relation, self.weight_bias_track, self.wandb, show_plot, verbose, return_raw, simulate_random, only_prediction)
if save_result:
import pickle, os
if save_name is None:
save_name = os.path.join(self.data_folder, 'disease_centric_eval.pkl')
with open(save_name, 'wb') as f:
pickle.dump(self.out, f)
return self.out
def retrieve_disease_idxs_test_set(self, relation):
relation = 'rev_' + relation
df_train_valid = pd.concat([self.df_train, self.df_valid])
df_dd = self.df_test[self.df_test.relation.isin(self.disease_rel_types)]
df_dd_train = df_train_valid[df_train_valid.relation.isin(self.disease_rel_types)]
df_rel_dd = df_dd[df_dd.relation == relation]
return df_rel_dd.x_idx.unique()
|
PypiClean
|
/itk_rtk-2.4.1-cp37-cp37m-macosx_10_9_x86_64.whl/itk/Configuration/RTKConfig.py
|
depends = ('ITKPyBase', 'ITKSmoothing', 'ITKRegistrationCommon', 'ITKOptimizers', 'ITKImageNoise', 'ITKIOTIFF', 'ITKIORAW', 'ITKIOMeta', 'ITKIOGDCM', 'ITKFFT', 'ITKConvolution', 'ITKCommon', 'ITKBridgeNumPy', )
templates = ( ('FixedArray', 'itk::FixedArray', 'itkFixedArrayi2', False, 'int, 2'),
('Vector', 'itk::Vector', 'itkVectori2', False, 'int, 2'),
('CovariantVector', 'itk::CovariantVector', 'itkCovariantVectorD1', False, 'double, 1'),
('Point', 'itk::Point', 'itkPointD1', False, 'double, 1'),
('vector', 'std::vector', 'vectoritkPointD1', False, 'itk::Point< double, 1 >'),
('ContinuousIndex', 'itk::ContinuousIndex', 'itkContinuousIndexD1', False, 'double, 1'),
('Matrix', 'itk::Matrix', 'itkMatrixD11', False, 'double, 1, 1'),
('VariableLengthVector', 'itk::VariableLengthVector', 'itkVariableLengthVectorI', False, 'int'),
('ImageBase', 'itk::ImageBase', 'itkImageBase1', False, '1'),
('Image', 'itk::Image', 'itkImageUS1', False, 'unsigned short, 1'),
('vector', 'std::vector', 'vectoritkImageUS1', False, 'itk::Image< unsigned short, 1 > '),
('Image', 'itk::Image', 'itkImageUC1', False, 'unsigned char, 1'),
('vector', 'std::vector', 'vectoritkImageUC1', False, 'itk::Image< unsigned char, 1 > '),
('Image', 'itk::Image', 'itkImageF1', False, 'float, 1'),
('vector', 'std::vector', 'vectoritkImageF1', False, 'itk::Image< float, 1 > '),
('Image', 'itk::Image', 'itkImageD1', False, 'double, 1'),
('vector', 'std::vector', 'vectoritkImageD1', False, 'itk::Image< double, 1 > '),
('Image', 'itk::Image', 'itkImageVF52', False, 'itk::Vector< float,5 >, 2'),
('vector', 'std::vector', 'vectoritkImageVF52', False, 'itk::Image< itk::Vector< float,5 >, 2 > '),
('Image', 'itk::Image', 'itkImageCVF52', False, 'itk::CovariantVector< float,5 >, 2'),
('vector', 'std::vector', 'vectoritkImageCVF52', False, 'itk::Image< itk::CovariantVector< float,5 >, 2 > '),
('Image', 'itk::Image', 'itkImageVF53', False, 'itk::Vector< float,5 >, 3'),
('vector', 'std::vector', 'vectoritkImageVF53', False, 'itk::Image< itk::Vector< float,5 >, 3 > '),
('Image', 'itk::Image', 'itkImageCVF53', False, 'itk::CovariantVector< float,5 >, 3'),
('vector', 'std::vector', 'vectoritkImageCVF53', False, 'itk::Image< itk::CovariantVector< float,5 >, 3 > '),
('Image', 'itk::Image', 'itkImageVF54', False, 'itk::Vector< float,5 >, 4'),
('vector', 'std::vector', 'vectoritkImageVF54', False, 'itk::Image< itk::Vector< float,5 >, 4 > '),
('Image', 'itk::Image', 'itkImageCVF54', False, 'itk::CovariantVector< float,5 >, 4'),
('vector', 'std::vector', 'vectoritkImageCVF54', False, 'itk::Image< itk::CovariantVector< float,5 >, 4 > '),
('Image', 'itk::Image', 'itkImageCVD53', False, 'itk::CovariantVector<double,5>, 3'),
('vector', 'std::vector', 'vectoritkImageCVD53', False, 'itk::Image< itk::CovariantVector<double,5>, 3 > '),
('ImageSource', 'itk::ImageSource', 'itkImageSourceIF1', False, 'itk::Image<float, 1>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceID1', False, 'itk::Image<double, 1>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceIUC1', False, 'itk::Image<unsigned char, 1>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceIUS1', False, 'itk::Image<unsigned short, 1>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceIVF52', False, 'itk::Image<itk::Vector< float,5 >, 2>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceIVF53', False, 'itk::Image<itk::Vector< float,5 >, 3>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceIVF54', False, 'itk::Image<itk::Vector< float,5 >, 4>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceICVD23', False, 'itk::Image<itk::CovariantVector<double, 2>, 3>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceICVD43', False, 'itk::Image<itk::CovariantVector<double, 4>, 3>'),
('ImageSource', 'itk::ImageSource', 'itkImageSourceICVD53', False, 'itk::Image<itk::CovariantVector<double, 5>, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIF1IF1', False, 'itk::Image<float, 1>, itk::Image<float, 1>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterID1ID1', False, 'itk::Image<double, 1>, itk::Image<double, 1>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIF3ID2', False, 'itk::Image<float, 3>, itk::Image<double, 2>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF52IVF52', False, 'itk::Image<itk::Vector< float,5 >, 2>, itk::Image<itk::Vector< float,5 >, 2>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF53IVF53', False, 'itk::Image<itk::Vector< float,5 >, 3>, itk::Image<itk::Vector< float,5 >, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF54IVF54', False, 'itk::Image<itk::Vector< float,5 >, 4>, itk::Image<itk::Vector< float,5 >, 4>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF24IVF23', False, 'itk::Image<itk::Vector< float,2 >, 4>, itk::Image<itk::Vector< float,2 >, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF34IVF33', False, 'itk::Image<itk::Vector< float,3 >, 4>, itk::Image<itk::Vector< float,3 >, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF44IVF43', False, 'itk::Image<itk::Vector< float,4 >, 4>, itk::Image<itk::Vector< float,4 >, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIVF54IVF53', False, 'itk::Image<itk::Vector< float,5 >, 4>, itk::Image<itk::Vector< float,5 >, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterICVD23ICVD23', False, 'itk::Image<itk::CovariantVector<double, 2>, 3>, itk::Image<itk::CovariantVector<double, 2>, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterICVD33ICVD33', False, 'itk::Image<itk::CovariantVector<double, 3>, 3>, itk::Image<itk::CovariantVector<double, 3>, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterICVD43ICVD43', False, 'itk::Image<itk::CovariantVector<double, 4>, 3>, itk::Image<itk::CovariantVector<double, 4>, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterICVD53ICVD53', False, 'itk::Image<itk::CovariantVector<double, 5>, 3>, itk::Image<itk::CovariantVector<double, 5>, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIF2IF1', False, 'itk::Image<float, 2>, itk::Image<float, 1>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterIF3VIF2', False, 'itk::Image<float, 3>, itk::VectorImage<float, 2>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterVIF2IF3', False, 'itk::VectorImage<float, 2>, itk::Image<float, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterVIF3IF4', False, 'itk::VectorImage<float, 3>, itk::Image<float, 4>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterID2ID1', False, 'itk::Image<double, 2>, itk::Image<double, 1>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterID3VID2', False, 'itk::Image<double, 3>, itk::VectorImage<double, 2>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterVID2ID3', False, 'itk::VectorImage<double, 2>, itk::Image<double, 3>'),
('ImageToImageFilter', 'itk::ImageToImageFilter', 'itkImageToImageFilterVID3ID4', False, 'itk::VectorImage<double, 3>, itk::Image<double, 4>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIF1IF1', False, 'itk::Image<float, 1>, itk::Image<float, 1>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterID1ID1', False, 'itk::Image<double, 1>, itk::Image<double, 1>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIUI2IF2', False, 'itk::Image<unsigned int, 2>, itk::Image<float, 2>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIUI3IF3', False, 'itk::Image<unsigned int, 3>, itk::Image<float, 3>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIUI2ID2', False, 'itk::Image<unsigned int, 2>, itk::Image<double, 2>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIUI3ID3', False, 'itk::Image<unsigned int, 3>, itk::Image<double, 3>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIF3ID2', False, 'itk::Image<float, 3>, itk::Image<double, 2>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIVF52IVF52', False, 'itk::Image<itk::Vector< float,5 >, 2>, itk::Image<itk::Vector< float,5 >, 2>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIVF53IVF53', False, 'itk::Image<itk::Vector< float,5 >, 3>, itk::Image<itk::Vector< float,5 >, 3>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterIVF54IVF54', False, 'itk::Image<itk::Vector< float,5 >, 4>, itk::Image<itk::Vector< float,5 >, 4>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterVIF3VIF3', False, 'itk::VectorImage<float, 3>, itk::VectorImage<float, 3>'),
('InPlaceImageFilter', 'itk::InPlaceImageFilter', 'itkInPlaceImageFilterVID3VID3', False, 'itk::VectorImage<double, 3>, itk::VectorImage<double, 3>'),
('ForwardProjectionImageFilter', 'rtk::ForwardProjectionImageFilter', 'rtkForwardProjectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('ForwardProjectionImageFilter', 'rtk::ForwardProjectionImageFilter', 'rtkForwardProjectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('ForwardProjectionImageFilter', 'rtk::ForwardProjectionImageFilter', 'rtkForwardProjectionImageFilterIVF23IVF23', False, 'itk::Image<itk::Vector< float,2 >,3>, itk::Image<itk::Vector< float,2 >,3>'),
('ForwardProjectionImageFilter', 'rtk::ForwardProjectionImageFilter', 'rtkForwardProjectionImageFilterIVF33IVF33', False, 'itk::Image<itk::Vector< float,3 >,3>, itk::Image<itk::Vector< float,3 >,3>'),
('ForwardProjectionImageFilter', 'rtk::ForwardProjectionImageFilter', 'rtkForwardProjectionImageFilterIVF43IVF43', False, 'itk::Image<itk::Vector< float,4 >,3>, itk::Image<itk::Vector< float,4 >,3>'),
('ForwardProjectionImageFilter', 'rtk::ForwardProjectionImageFilter', 'rtkForwardProjectionImageFilterIVF53IVF53', False, 'itk::Image<itk::Vector< float,5 >,3>, itk::Image<itk::Vector< float,5 >,3>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferIVF52', False, 'itk::Image<itk::Vector< float,5 >,2>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferICVF52', False, 'itk::Image<itk::CovariantVector< float,5 >,2>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferIVF53', False, 'itk::Image<itk::Vector< float,5 >,3>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferICVF53', False, 'itk::Image<itk::CovariantVector< float,5 >,3>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferIVF54', False, 'itk::Image<itk::Vector< float,5 >,4>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferICVF54', False, 'itk::Image<itk::CovariantVector< float,5 >,4>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferID1', False, 'itk::Image<double, 1>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferIUC1', False, 'itk::Image<unsigned char, 1>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferIUS1', False, 'itk::Image<unsigned short, 1>'),
('PyBuffer', 'itk::PyBuffer', 'itkPyBufferIF1', False, 'itk::Image<float, 1>'),
('ImageDuplicator', 'itk::ImageDuplicator', 'itkImageDuplicatorID1', False, 'itk::Image<double, 1>'),
('ImageDuplicator', 'itk::ImageDuplicator', 'itkImageDuplicatorIUC1', False, 'itk::Image<unsigned char, 1>'),
('ImageDuplicator', 'itk::ImageDuplicator', 'itkImageDuplicatorIUS1', False, 'itk::Image<unsigned short, 1>'),
('ImageDuplicator', 'itk::ImageDuplicator', 'itkImageDuplicatorIF1', False, 'itk::Image<float, 1>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderIVF52', False, 'itk::Image<itk::Vector< float,5 >, 2>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderIVF53', False, 'itk::Image<itk::Vector< float,5 >, 3>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderIVF54', False, 'itk::Image<itk::Vector< float,5 >, 4>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderID1', False, 'itk::Image<double, 1>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderIUC1', False, 'itk::Image<unsigned char, 1>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderIUS1', False, 'itk::Image<unsigned short, 1>'),
('ImageFileReader', 'itk::ImageFileReader', 'itkImageFileReaderIF1', False, 'itk::Image<float, 1>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterIVF52', False, 'itk::Image<itk::Vector< float,5 >, 2>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterIVF53', False, 'itk::Image<itk::Vector< float,5 >, 3>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterIVF54', False, 'itk::Image<itk::Vector< float,5 >, 4>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterID1', False, 'itk::Image<double, 1>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterIUC1', False, 'itk::Image<unsigned char, 1>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterIUS1', False, 'itk::Image<unsigned short, 1>'),
('ImageFileWriter', 'itk::ImageFileWriter', 'itkImageFileWriterIF1', False, 'itk::Image<float, 1>'),
('ImageRegion', 'itk::ImageRegion', 'itkImageRegion1', False, '1'),
('vector', 'std::vector', 'vectoritkImageRegion1', False, 'itk::ImageRegion< 1 >'),
('WarpImageFilter', 'itk::WarpImageFilter', 'itkWarpImageFilterIF2IF2ICVF22', False, 'itk::Image<float, 2>, itk::Image<float, 2>, itk::Image< itk::CovariantVector<float, 2>, 2>'),
('WarpImageFilter', 'itk::WarpImageFilter', 'itkWarpImageFilterID2ID2ICVD22', False, 'itk::Image<double, 2>, itk::Image<double, 2>, itk::Image< itk::CovariantVector<double, 2>, 2>'),
('WarpImageFilter', 'itk::WarpImageFilter', 'itkWarpImageFilterIF3IF3ICVF33', False, 'itk::Image<float, 3>, itk::Image<float, 3>, itk::Image< itk::CovariantVector<float, 3>, 3>'),
('WarpImageFilter', 'itk::WarpImageFilter', 'itkWarpImageFilterID3ID3ICVD33', False, 'itk::Image<double, 3>, itk::Image<double, 3>, itk::Image< itk::CovariantVector<double, 3>, 3>'),
('WarpImageFilter', 'itk::WarpImageFilter', 'itkWarpImageFilterIF4IF4ICVF44', False, 'itk::Image<float, 4>, itk::Image<float, 4>, itk::Image< itk::CovariantVector<float, 4>, 4>'),
('WarpImageFilter', 'itk::WarpImageFilter', 'itkWarpImageFilterID4ID4ICVD44', False, 'itk::Image<double, 4>, itk::Image<double, 4>, itk::Image< itk::CovariantVector<double, 4>, 4>'),
('ADMMTotalVariationConeBeamReconstructionFilter', 'rtk::ADMMTotalVariationConeBeamReconstructionFilter', 'rtkADMMTotalVariationConeBeamReconstructionFilterIF3', False, 'itk::Image< float,3 >'),
('ADMMTotalVariationConeBeamReconstructionFilter', 'rtk::ADMMTotalVariationConeBeamReconstructionFilter', 'rtkADMMTotalVariationConeBeamReconstructionFilterID3', False, 'itk::Image< double,3 >'),
('ADMMWaveletsConeBeamReconstructionFilter', 'rtk::ADMMWaveletsConeBeamReconstructionFilter', 'rtkADMMWaveletsConeBeamReconstructionFilterIF3', False, 'itk::Image< float,3 >'),
('ADMMWaveletsConeBeamReconstructionFilter', 'rtk::ADMMWaveletsConeBeamReconstructionFilter', 'rtkADMMWaveletsConeBeamReconstructionFilterID3', False, 'itk::Image< double,3 >'),
('AmsterdamShroudImageFilter', 'rtk::AmsterdamShroudImageFilter', 'rtkAmsterdamShroudImageFilterIF3', False, 'itk::Image< float,3 >'),
('AmsterdamShroudImageFilter', 'rtk::AmsterdamShroudImageFilter', 'rtkAmsterdamShroudImageFilterID3', False, 'itk::Image< double,3 >'),
('BackProjectionImageFilter', 'rtk::BackProjectionImageFilter', 'rtkBackProjectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('BackProjectionImageFilter', 'rtk::BackProjectionImageFilter', 'rtkBackProjectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('BioscanGeometryReader', 'rtk::BioscanGeometryReader', 'rtkBioscanGeometryReader', False),
('BoellaardScatterCorrectionImageFilter', 'rtk::BoellaardScatterCorrectionImageFilter', 'rtkBoellaardScatterCorrectionImageFilterIF3', False, 'itk::Image< float,3 >'),
('BoellaardScatterCorrectionImageFilter', 'rtk::BoellaardScatterCorrectionImageFilter', 'rtkBoellaardScatterCorrectionImageFilterID3', False, 'itk::Image< double,3 >'),
('BoxShape', 'rtk::BoxShape', 'rtkBoxShape', False),
('ConditionalMedianImageFilter', 'rtk::ConditionalMedianImageFilter', 'rtkConditionalMedianImageFilterIF2', False, 'itk::Image< float,2 >'),
('ConditionalMedianImageFilter', 'rtk::ConditionalMedianImageFilter', 'rtkConditionalMedianImageFilterIF3', False, 'itk::Image< float,3 >'),
('ConditionalMedianImageFilter', 'rtk::ConditionalMedianImageFilter', 'rtkConditionalMedianImageFilterIF4', False, 'itk::Image< float,4 >'),
('ConditionalMedianImageFilter', 'rtk::ConditionalMedianImageFilter', 'rtkConditionalMedianImageFilterID2', False, 'itk::Image< double,2 >'),
('ConditionalMedianImageFilter', 'rtk::ConditionalMedianImageFilter', 'rtkConditionalMedianImageFilterID3', False, 'itk::Image< double,3 >'),
('ConditionalMedianImageFilter', 'rtk::ConditionalMedianImageFilter', 'rtkConditionalMedianImageFilterID4', False, 'itk::Image< double,4 >'),
('ConjugateGradientConeBeamReconstructionFilter', 'rtk::ConjugateGradientConeBeamReconstructionFilter', 'rtkConjugateGradientConeBeamReconstructionFilterIF3', False, 'itk::Image< float,3 >'),
('ConjugateGradientConeBeamReconstructionFilter', 'rtk::ConjugateGradientConeBeamReconstructionFilter', 'rtkConjugateGradientConeBeamReconstructionFilterID3', False, 'itk::Image< double,3 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIF2', False, 'itk::Image< float,2 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIF3', False, 'itk::Image< float,3 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIF4', False, 'itk::Image< float,4 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceID2', False, 'itk::Image< double,2 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceID3', False, 'itk::Image< double,3 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceID4', False, 'itk::Image< double,4 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF22', False, 'itk::Image< itk::Vector< float,2 >,2 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF23', False, 'itk::Image< itk::Vector< float,2 >,3 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF24', False, 'itk::Image< itk::Vector< float,2 >,4 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF32', False, 'itk::Image< itk::Vector< float,3 >,2 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF33', False, 'itk::Image< itk::Vector< float,3 >,3 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF34', False, 'itk::Image< itk::Vector< float,3 >,4 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF42', False, 'itk::Image< itk::Vector< float,4 >,2 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF43', False, 'itk::Image< itk::Vector< float,4 >,3 >'),
('ConstantImageSource', 'rtk::ConstantImageSource', 'rtkConstantImageSourceIVF44', False, 'itk::Image< itk::Vector< float,4 >,4 >'),
('ConvexShape', 'rtk::ConvexShape', 'rtkConvexShape', False),
('CyclicDeformationImageFilter', 'rtk::CyclicDeformationImageFilter', 'rtkCyclicDeformationImageFilterIF4IF3', False, 'itk::Image<float, 4>, itk::Image<float, 3>'),
('CyclicDeformationImageFilter', 'rtk::CyclicDeformationImageFilter', 'rtkCyclicDeformationImageFilterIVF24IVF23', False, 'itk::Image<itk::Vector< float,2 >, 4>, itk::Image<itk::Vector< float,2 >, 3>'),
('CyclicDeformationImageFilter', 'rtk::CyclicDeformationImageFilter', 'rtkCyclicDeformationImageFilterIVF34IVF33', False, 'itk::Image<itk::Vector< float,3 >, 4>, itk::Image<itk::Vector< float,3 >, 3>'),
('CyclicDeformationImageFilter', 'rtk::CyclicDeformationImageFilter', 'rtkCyclicDeformationImageFilterIVF44IVF43', False, 'itk::Image<itk::Vector< float,4 >, 4>, itk::Image<itk::Vector< float,4 >, 3>'),
('CyclicDeformationImageFilter', 'rtk::CyclicDeformationImageFilter', 'rtkCyclicDeformationImageFilterIVF54IVF53', False, 'itk::Image<itk::Vector< float,5 >, 4>, itk::Image<itk::Vector< float,5 >, 3>'),
('DCMImagXImageIO', 'rtk::DCMImagXImageIO', 'rtkDCMImagXImageIO', False),
('DCMImagXImageIOFactory', 'rtk::DCMImagXImageIOFactory', 'rtkDCMImagXImageIOFactory', False),
('DaubechiesWaveletsDenoiseSequenceImageFilter', 'rtk::DaubechiesWaveletsDenoiseSequenceImageFilter', 'rtkDaubechiesWaveletsDenoiseSequenceImageFilterIF4', False, 'itk::Image< float,4 >'),
('DaubechiesWaveletsDenoiseSequenceImageFilter', 'rtk::DaubechiesWaveletsDenoiseSequenceImageFilter', 'rtkDaubechiesWaveletsDenoiseSequenceImageFilterID4', False, 'itk::Image< double,4 >'),
('DeconstructSoftThresholdReconstructImageFilter', 'rtk::DeconstructSoftThresholdReconstructImageFilter', 'rtkDeconstructSoftThresholdReconstructImageFilterIF2', False, 'itk::Image< float,2 >'),
('DeconstructSoftThresholdReconstructImageFilter', 'rtk::DeconstructSoftThresholdReconstructImageFilter', 'rtkDeconstructSoftThresholdReconstructImageFilterIF3', False, 'itk::Image< float,3 >'),
('DeconstructSoftThresholdReconstructImageFilter', 'rtk::DeconstructSoftThresholdReconstructImageFilter', 'rtkDeconstructSoftThresholdReconstructImageFilterIF4', False, 'itk::Image< float,4 >'),
('DeconstructSoftThresholdReconstructImageFilter', 'rtk::DeconstructSoftThresholdReconstructImageFilter', 'rtkDeconstructSoftThresholdReconstructImageFilterID2', False, 'itk::Image< double,2 >'),
('DeconstructSoftThresholdReconstructImageFilter', 'rtk::DeconstructSoftThresholdReconstructImageFilter', 'rtkDeconstructSoftThresholdReconstructImageFilterID3', False, 'itk::Image< double,3 >'),
('DeconstructSoftThresholdReconstructImageFilter', 'rtk::DeconstructSoftThresholdReconstructImageFilter', 'rtkDeconstructSoftThresholdReconstructImageFilterID4', False, 'itk::Image< double,4 >'),
('DenoisingBPDQImageFilter', 'rtk::DenoisingBPDQImageFilter', 'rtkDenoisingBPDQImageFilterIF2ICVF22', False, 'itk::Image<float, 2>, itk::Image<itk::CovariantVector< float,2 >, 2>'),
('DenoisingBPDQImageFilter', 'rtk::DenoisingBPDQImageFilter', 'rtkDenoisingBPDQImageFilterID2ICVF22', False, 'itk::Image<double, 2>, itk::Image<itk::CovariantVector< float,2 >, 2>'),
('DenoisingBPDQImageFilter', 'rtk::DenoisingBPDQImageFilter', 'rtkDenoisingBPDQImageFilterIF3ICVF33', False, 'itk::Image<float, 3>, itk::Image<itk::CovariantVector< float,3 >, 3>'),
('DenoisingBPDQImageFilter', 'rtk::DenoisingBPDQImageFilter', 'rtkDenoisingBPDQImageFilterID3ICVF33', False, 'itk::Image<double, 3>, itk::Image<itk::CovariantVector< float,3 >, 3>'),
('DenoisingBPDQImageFilter', 'rtk::DenoisingBPDQImageFilter', 'rtkDenoisingBPDQImageFilterIF4ICVF44', False, 'itk::Image<float, 4>, itk::Image<itk::CovariantVector< float,4 >, 4>'),
('DenoisingBPDQImageFilter', 'rtk::DenoisingBPDQImageFilter', 'rtkDenoisingBPDQImageFilterID4ICVF44', False, 'itk::Image<double, 4>, itk::Image<itk::CovariantVector< float,4 >, 4>'),
('DigisensGeometryReader', 'rtk::DigisensGeometryReader', 'rtkDigisensGeometryReader', False),
('DisplacedDetectorForOffsetFieldOfViewImageFilter', 'rtk::DisplacedDetectorForOffsetFieldOfViewImageFilter', 'rtkDisplacedDetectorForOffsetFieldOfViewImageFilterIF3', False, 'itk::Image< float,3 >'),
('DisplacedDetectorForOffsetFieldOfViewImageFilter', 'rtk::DisplacedDetectorForOffsetFieldOfViewImageFilter', 'rtkDisplacedDetectorForOffsetFieldOfViewImageFilterID3', False, 'itk::Image< double,3 >'),
('DisplacedDetectorImageFilter', 'rtk::DisplacedDetectorImageFilter', 'rtkDisplacedDetectorImageFilterIF3', False, 'itk::Image< float,3 >'),
('DisplacedDetectorImageFilter', 'rtk::DisplacedDetectorImageFilter', 'rtkDisplacedDetectorImageFilterIF4', False, 'itk::Image< float,4 >'),
('DisplacedDetectorImageFilter', 'rtk::DisplacedDetectorImageFilter', 'rtkDisplacedDetectorImageFilterID3', False, 'itk::Image< double,3 >'),
('DisplacedDetectorImageFilter', 'rtk::DisplacedDetectorImageFilter', 'rtkDisplacedDetectorImageFilterID4', False, 'itk::Image< double,4 >'),
('DrawConvexImageFilter', 'rtk::DrawConvexImageFilter', 'rtkDrawConvexImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('DrawConvexImageFilter', 'rtk::DrawConvexImageFilter', 'rtkDrawConvexImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('DrawEllipsoidImageFilter', 'rtk::DrawEllipsoidImageFilter', 'rtkDrawEllipsoidImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('DrawEllipsoidImageFilter', 'rtk::DrawEllipsoidImageFilter', 'rtkDrawEllipsoidImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('DrawGeometricPhantomImageFilter', 'rtk::DrawGeometricPhantomImageFilter', 'rtkDrawGeometricPhantomImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('DrawGeometricPhantomImageFilter', 'rtk::DrawGeometricPhantomImageFilter', 'rtkDrawGeometricPhantomImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('DrawQuadricImageFilter', 'rtk::DrawQuadricImageFilter', 'rtkDrawQuadricImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('DrawQuadricImageFilter', 'rtk::DrawQuadricImageFilter', 'rtkDrawQuadricImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('DrawSheppLoganFilter', 'rtk::DrawSheppLoganFilter', 'rtkDrawSheppLoganFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('DrawSheppLoganFilter', 'rtk::DrawSheppLoganFilter', 'rtkDrawSheppLoganFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('EdfImageIO', 'rtk::EdfImageIO', 'rtkEdfImageIO', False),
('EdfImageIOFactory', 'rtk::EdfImageIOFactory', 'rtkEdfImageIOFactory', False),
('EdfRawToAttenuationImageFilter', 'rtk::EdfRawToAttenuationImageFilter', 'rtkEdfRawToAttenuationImageFilterIF2IF2', False, 'itk::Image< float,2 >, itk::Image< float,2 >'),
('EdfRawToAttenuationImageFilter', 'rtk::EdfRawToAttenuationImageFilter', 'rtkEdfRawToAttenuationImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('EdfRawToAttenuationImageFilter', 'rtk::EdfRawToAttenuationImageFilter', 'rtkEdfRawToAttenuationImageFilterIF4IF4', False, 'itk::Image< float,4 >, itk::Image< float,4 >'),
('EdfRawToAttenuationImageFilter', 'rtk::EdfRawToAttenuationImageFilter', 'rtkEdfRawToAttenuationImageFilterID2ID2', False, 'itk::Image< double,2 >, itk::Image< double,2 >'),
('EdfRawToAttenuationImageFilter', 'rtk::EdfRawToAttenuationImageFilter', 'rtkEdfRawToAttenuationImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('EdfRawToAttenuationImageFilter', 'rtk::EdfRawToAttenuationImageFilter', 'rtkEdfRawToAttenuationImageFilterID4ID4', False, 'itk::Image< double,4 >, itk::Image< double,4 >'),
('ElektaSynergyGeometryReader', 'rtk::ElektaSynergyGeometryReader', 'rtkElektaSynergyGeometryReader', False),
('ElektaSynergyRawLookupTableImageFilter', 'rtk::ElektaSynergyRawLookupTableImageFilter', 'rtkElektaSynergyRawLookupTableImageFilterIUS2IUS2', False, 'itk::Image<unsigned short, 2>, itk::Image<unsigned short, 2>'),
('ElektaSynergyRawLookupTableImageFilter', 'rtk::ElektaSynergyRawLookupTableImageFilter', 'rtkElektaSynergyRawLookupTableImageFilterIUS3IUS3', False, 'itk::Image<unsigned short, 3>, itk::Image<unsigned short, 3>'),
('ElektaXVI5GeometryXMLFileReader', 'rtk::ElektaXVI5GeometryXMLFileReader', 'rtkElektaXVI5GeometryXMLFileReader', False),
('ExtractPhaseImageFilter', 'rtk::ExtractPhaseImageFilter', 'rtkExtractPhaseImageFilterIF1', False, 'itk::Image<float, 1>'),
('ExtractPhaseImageFilter', 'rtk::ExtractPhaseImageFilter', 'rtkExtractPhaseImageFilterID1', False, 'itk::Image<double, 1>'),
('FDKBackProjectionImageFilter', 'rtk::FDKBackProjectionImageFilter', 'rtkFDKBackProjectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('FDKBackProjectionImageFilter', 'rtk::FDKBackProjectionImageFilter', 'rtkFDKBackProjectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('FDKConeBeamReconstructionFilter', 'rtk::FDKConeBeamReconstructionFilter', 'rtkFDKConeBeamReconstructionFilterIF3', False, 'itk::Image<float, 3>'),
('FDKConeBeamReconstructionFilter', 'rtk::FDKConeBeamReconstructionFilter', 'rtkFDKConeBeamReconstructionFilterID3', False, 'itk::Image<double, 3>'),
('FDKWarpBackProjectionImageFilter', 'rtk::FDKWarpBackProjectionImageFilter', 'rtkFDKWarpBackProjectionImageFilterIF3IF3CDFIVF34IVF33', False, 'itk::Image<float, 3>, itk::Image<float, 3>, rtk::CyclicDeformationImageFilter< itk::Image<itk::Vector< float,3 >, 4>, itk::Image<itk::Vector< float,3 >, 3> >'),
('FDKWeightProjectionFilter', 'rtk::FDKWeightProjectionFilter', 'rtkFDKWeightProjectionFilterIF2', False, 'itk::Image< float,2 >'),
('FDKWeightProjectionFilter', 'rtk::FDKWeightProjectionFilter', 'rtkFDKWeightProjectionFilterIF3', False, 'itk::Image< float,3 >'),
('FDKWeightProjectionFilter', 'rtk::FDKWeightProjectionFilter', 'rtkFDKWeightProjectionFilterIF4', False, 'itk::Image< float,4 >'),
('FDKWeightProjectionFilter', 'rtk::FDKWeightProjectionFilter', 'rtkFDKWeightProjectionFilterID2', False, 'itk::Image< double,2 >'),
('FDKWeightProjectionFilter', 'rtk::FDKWeightProjectionFilter', 'rtkFDKWeightProjectionFilterID3', False, 'itk::Image< double,3 >'),
('FDKWeightProjectionFilter', 'rtk::FDKWeightProjectionFilter', 'rtkFDKWeightProjectionFilterID4', False, 'itk::Image< double,4 >'),
('FFTProjectionsConvolutionImageFilter', 'rtk::FFTProjectionsConvolutionImageFilter', 'rtkFFTProjectionsConvolutionImageFilterIF3IF3D', False, 'itk::Image<float, 3>, itk::Image<float, 3>, double'),
('FFTProjectionsConvolutionImageFilter', 'rtk::FFTProjectionsConvolutionImageFilter', 'rtkFFTProjectionsConvolutionImageFilterIF3IF3F', False, 'itk::Image<float, 3>, itk::Image<float, 3>, float'),
('FFTProjectionsConvolutionImageFilter', 'rtk::FFTProjectionsConvolutionImageFilter', 'rtkFFTProjectionsConvolutionImageFilterID3ID3D', False, 'itk::Image<double, 3>, itk::Image<double, 3>, double'),
('FFTProjectionsConvolutionImageFilter', 'rtk::FFTProjectionsConvolutionImageFilter', 'rtkFFTProjectionsConvolutionImageFilterID3ID3F', False, 'itk::Image<double, 3>, itk::Image<double, 3>, float'),
('FFTRampImageFilter', 'rtk::FFTRampImageFilter', 'rtkFFTRampImageFilterIF3IF3D', False, 'itk::Image<float, 3>, itk::Image<float, 3>, double'),
('FFTRampImageFilter', 'rtk::FFTRampImageFilter', 'rtkFFTRampImageFilterID3ID3D', False, 'itk::Image<double, 3>, itk::Image<double, 3>, double'),
('FieldOfViewImageFilter', 'rtk::FieldOfViewImageFilter', 'rtkFieldOfViewImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('FieldOfViewImageFilter', 'rtk::FieldOfViewImageFilter', 'rtkFieldOfViewImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('ForwardWarpImageFilter', 'rtk::ForwardWarpImageFilter', 'rtkForwardWarpImageFilterIF2IF2ICVF22', False, 'itk::Image<float, 2>, itk::Image<float, 2>, itk::Image< itk::CovariantVector<float, 2>, 2>'),
('ForwardWarpImageFilter', 'rtk::ForwardWarpImageFilter', 'rtkForwardWarpImageFilterIF3IF3ICVF33', False, 'itk::Image<float, 3>, itk::Image<float, 3>, itk::Image< itk::CovariantVector<float, 3>, 3>'),
('ForwardWarpImageFilter', 'rtk::ForwardWarpImageFilter', 'rtkForwardWarpImageFilterIF4IF4ICVF44', False, 'itk::Image<float, 4>, itk::Image<float, 4>, itk::Image< itk::CovariantVector<float, 4>, 4>'),
('FourDConjugateGradientConeBeamReconstructionFilter', 'rtk::FourDConjugateGradientConeBeamReconstructionFilter', 'rtkFourDConjugateGradientConeBeamReconstructionFilterIF4IF3', False, 'itk::Image<float, 4>, itk::Image<float, 3>'),
('FourDConjugateGradientConeBeamReconstructionFilter', 'rtk::FourDConjugateGradientConeBeamReconstructionFilter', 'rtkFourDConjugateGradientConeBeamReconstructionFilterID4ID3', False, 'itk::Image<double, 4>, itk::Image<double, 3>'),
('FourDSARTConeBeamReconstructionFilter', 'rtk::FourDSARTConeBeamReconstructionFilter', 'rtkFourDSARTConeBeamReconstructionFilterIF4IF3', False, 'itk::Image<float, 4>, itk::Image<float, 3>'),
('FourDSARTConeBeamReconstructionFilter', 'rtk::FourDSARTConeBeamReconstructionFilter', 'rtkFourDSARTConeBeamReconstructionFilterID4ID3', False, 'itk::Image<double, 4>, itk::Image<double, 3>'),
('FourDToProjectionStackImageFilter', 'rtk::FourDToProjectionStackImageFilter', 'rtkFourDToProjectionStackImageFilterIF3IF4', False, 'itk::Image<float, 3>, itk::Image<float, 4>'),
('FourDToProjectionStackImageFilter', 'rtk::FourDToProjectionStackImageFilter', 'rtkFourDToProjectionStackImageFilterID3ID4', False, 'itk::Image<double, 3>, itk::Image<double, 4>'),
('GeometricPhantom', 'rtk::GeometricPhantom', 'rtkGeometricPhantom', False),
('GeometricPhantomFileReader', 'rtk::GeometricPhantomFileReader', 'rtkGeometricPhantomFileReader', False),
('GlobalResourceProbe', 'rtk::GlobalResourceProbe', 'rtkGlobalResourceProbe', False),
('HisImageIO', 'rtk::HisImageIO', 'rtkHisImageIO', False),
('HisImageIOFactory', 'rtk::HisImageIOFactory', 'rtkHisImageIOFactory', False),
('HncImageIO', 'rtk::HncImageIO', 'rtkHncImageIO', False),
('HncImageIOFactory', 'rtk::HncImageIOFactory', 'rtkHncImageIOFactory', False),
('HndImageIO', 'rtk::HndImageIO', 'rtkHndImageIO', False),
('HndImageIOFactory', 'rtk::HndImageIOFactory', 'rtkHndImageIOFactory', False),
('I0EstimationProjectionFilter', 'rtk::I0EstimationProjectionFilter', 'rtkI0EstimationProjectionFilterIUS3IUS32', False, 'itk::Image<unsigned short, 3>, itk::Image<unsigned short, 3>, 2'),
('ImagXImageIO', 'rtk::ImagXImageIO', 'rtkImagXImageIO', False),
('ImagXImageIOFactory', 'rtk::ImagXImageIOFactory', 'rtkImagXImageIOFactory', False),
('ImageToVectorImageFilter', 'rtk::ImageToVectorImageFilter', 'rtkImageToVectorImageFilterIF2VIF2', False, 'itk::Image<float, 2>, itk::VectorImage<float, 2>'),
('ImageToVectorImageFilter', 'rtk::ImageToVectorImageFilter', 'rtkImageToVectorImageFilterIF3VIF2', False, 'itk::Image<float, 3>, itk::VectorImage<float, 2>'),
('ImageToVectorImageFilter', 'rtk::ImageToVectorImageFilter', 'rtkImageToVectorImageFilterID2VID2', False, 'itk::Image<double, 2>, itk::VectorImage<double, 2>'),
('ImageToVectorImageFilter', 'rtk::ImageToVectorImageFilter', 'rtkImageToVectorImageFilterID3VID2', False, 'itk::Image<double, 3>, itk::VectorImage<double, 2>'),
('IntersectionOfConvexShapes', 'rtk::IntersectionOfConvexShapes', 'rtkIntersectionOfConvexShapes', False),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterIF3', False, 'itk::Image< float,3 >'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterID3', False, 'itk::Image< double,3 >'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterIF4IF3', False, 'itk::Image<float, 4>, itk::Image<float, 3>'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterID4ID3', False, 'itk::Image<double, 4>, itk::Image<double, 3>'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterIVF23IVF23', False, 'itk::Image<itk::Vector< float,2 >, 3>, itk::Image<itk::Vector< float,2 >, 3>'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterIVF33IVF33', False, 'itk::Image<itk::Vector< float,3 >, 3>, itk::Image<itk::Vector< float,3 >, 3>'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterIVF43IVF43', False, 'itk::Image<itk::Vector< float,4 >, 3>, itk::Image<itk::Vector< float,4 >, 3>'),
('IterativeConeBeamReconstructionFilter', 'rtk::IterativeConeBeamReconstructionFilter', 'rtkIterativeConeBeamReconstructionFilterIVF53IVF53', False, 'itk::Image<itk::Vector< float,5 >, 3>, itk::Image<itk::Vector< float,5 >, 3>'),
('IterativeFDKConeBeamReconstructionFilter', 'rtk::IterativeFDKConeBeamReconstructionFilter', 'rtkIterativeFDKConeBeamReconstructionFilterIF3IF3D', False, 'itk::Image<float, 3>, itk::Image<float, 3>, double'),
('IterativeFDKConeBeamReconstructionFilter', 'rtk::IterativeFDKConeBeamReconstructionFilter', 'rtkIterativeFDKConeBeamReconstructionFilterID3ID3D', False, 'itk::Image<double, 3>, itk::Image<double, 3>, double'),
('InterpolationWeightMultiplicationAttenuatedBackProjection', 'rtk::Functor::InterpolationWeightMultiplicationAttenuatedBackProjection', 'rtkFunctorInterpolationWeightMultiplicationAttenuatedBackProjectionFFF', False, 'float, float, float'),
('InterpolationWeightMultiplicationAttenuatedBackProjection', 'rtk::Functor::InterpolationWeightMultiplicationAttenuatedBackProjection', 'rtkFunctorInterpolationWeightMultiplicationAttenuatedBackProjectionDDD', False, 'double, double, double'),
('SplatWeightMultiplicationAttenuated', 'rtk::Functor::SplatWeightMultiplicationAttenuated', 'rtkFunctorSplatWeightMultiplicationFDF', False, 'float, double, float'),
('SplatWeightMultiplicationAttenuated', 'rtk::Functor::SplatWeightMultiplicationAttenuated', 'rtkFunctorSplatWeightMultiplicationDDD', False, 'double, double, double'),
('ComputeAttenuationCorrectionBackProjection', 'rtk::Functor::ComputeAttenuationCorrectionBackProjection', 'rtkComputeAttenuationCorrectionBackProjectionFF', False, 'float, float'),
('ComputeAttenuationCorrectionBackProjection', 'rtk::Functor::ComputeAttenuationCorrectionBackProjection', 'rtkComputeAttenuationCorrectionBackProjectionDD', False, 'double, double'),
('JosephBackProjectionImageFilter', 'rtk::JosephBackProjectionImageFilter', 'rtkJosephBackProjectionImageFilterIF3IF3SWMFDFIS', False, 'itk::Image<float, 3>, itk::Image< float, 3>, rtk::Functor::InterpolationWeightMultiplicationAttenuatedBackProjection<float, float, float>, rtk::Functor::SplatWeightMultiplicationAttenuated<float, double, float>, rtk::Functor::ComputeAttenuationCorrectionBackProjection<float, float>'),
('JosephBackProjectionImageFilter', 'rtk::JosephBackProjectionImageFilter', 'rtkJosephBackProjectionImageFilterID3ID3SWMDDDIS', False, 'itk::Image<double, 3>, itk::Image< double, 3>, rtk::Functor::InterpolationWeightMultiplicationAttenuatedBackProjection<double, double, double>, rtk::Functor::SplatWeightMultiplicationAttenuated<double, double, double>, rtk::Functor::ComputeAttenuationCorrectionBackProjection<double, double>'),
('JosephBackAttenuatedProjectionImageFilter', 'rtk::JosephBackAttenuatedProjectionImageFilter', 'rtkJosephBackAttenuatedProjectionImageFilterIF3IF3SWMFDF', False, 'itk::Image<float, 3>, itk::Image< float, 3>'),
('JosephBackAttenuatedProjectionImageFilter', 'rtk::JosephBackAttenuatedProjectionImageFilter', 'rtkJosephBackAttenuatedProjectionImageFilterID3ID3SWMDDD', False, 'itk::Image<double, 3>, itk::Image< double, 3>'),
('SplatWeightMultiplication', 'rtk::Functor::SplatWeightMultiplication', 'rtkFunctorSplatWeightMultiplicationFDF', False, 'float, double, float'),
('SplatWeightMultiplication', 'rtk::Functor::SplatWeightMultiplication', 'rtkFunctorSplatWeightMultiplicationDDD', False, 'double, double, double'),
('JosephBackProjectionImageFilter', 'rtk::JosephBackProjectionImageFilter', 'rtkJosephBackProjectionImageFilterIF3IF3SWMFDF', False, 'itk::Image<float, 3>, itk::Image< float, 3>'),
('JosephBackProjectionImageFilter', 'rtk::JosephBackProjectionImageFilter', 'rtkJosephBackProjectionImageFilterID3ID3SWMDDD', False, 'itk::Image<double, 3>, itk::Image< double, 3>'),
('InterpolationWeightMultiplicationAttenuated', 'rtk::Functor::InterpolationWeightMultiplicationAttenuated', 'rtkFunctorInterpolationWeightMultiplicationAttenuatedBackProjectionFFF', False, 'float, float, float'),
('InterpolationWeightMultiplicationAttenuated', 'rtk::Functor::InterpolationWeightMultiplicationAttenuated', 'rtkFunctorInterpolationWeightMultiplicationAttenuatedBackProjectionDDD', False, 'double, double, double'),
('ProjectedValueAccumulationAttenuated', 'rtk::Functor::ProjectedValueAccumulationAttenuated', 'rtkProjectedValueAccumulationAttenuatedFF', False, 'float, float'),
('ProjectedValueAccumulationAttenuated', 'rtk::Functor::ProjectedValueAccumulationAttenuated', 'rtkProjectedValueAccumulationAttenuatedDD', False, 'double, double'),
('ComputeAttenuationCorrection', 'rtk::Functor::ComputeAttenuationCorrection', 'rtkComputeAttenuationCorrectionFF', False, 'float, float'),
('ComputeAttenuationCorrection', 'rtk::Functor::ComputeAttenuationCorrection', 'rtkComputeAttenuationCorrectionDD', False, 'double, double'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterIF3IF3SWMFDFIPC', False, 'itk::Image<float, 3>, itk::Image< float, 3>, rtk::Functor::InterpolationWeightMultiplicationAttenuated<float, float, float>, rtk::Functor::ProjectedValueAccumulationAttenuated<float, float>, rtk::Functor::ComputeAttenuationCorrection<float, float>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterID3ID3SWMDDDIPC', False, 'itk::Image<double, 3>, itk::Image< double, 3>, rtk::Functor::InterpolationWeightMultiplicationAttenuated<double, double, double>, rtk::Functor::ProjectedValueAccumulationAttenuated<double, double>, rtk::Functor::ComputeAttenuationCorrection<double, double>'),
('JosephForwardAttenuatedProjectionImageFilter', 'rtk::JosephForwardAttenuatedProjectionImageFilter', 'rtkJosephForwardAttenuatedProjectionImageFilterIF3IF3', False, 'itk::Image<float, 3>, itk::Image<float, 3>'),
('JosephForwardAttenuatedProjectionImageFilter', 'rtk::JosephForwardAttenuatedProjectionImageFilter', 'rtkJosephForwardAttenuatedProjectionImageFilterID3ID3', False, 'itk::Image<double, 3>, itk::Image<double, 3>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterIF3IF3', False, 'itk::Image<float, 3>, itk::Image<float, 3>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterID3ID3', False, 'itk::Image<double, 3>, itk::Image<double, 3>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterIVF23IVF23', False, 'itk::Image<itk::Vector< float,2 >,3>, itk::Image<itk::Vector< float,2 >,3>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterIVF33IVF33', False, 'itk::Image<itk::Vector< float,3 >,3>, itk::Image<itk::Vector< float,3 >,3>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterIVF43IVF43', False, 'itk::Image<itk::Vector< float,4 >,3>, itk::Image<itk::Vector< float,4 >,3>'),
('JosephForwardProjectionImageFilter', 'rtk::JosephForwardProjectionImageFilter', 'rtkJosephForwardProjectionImageFilterIVF53IVF53', False, 'itk::Image<itk::Vector< float,5 >,3>, itk::Image<itk::Vector< float,5 >,3>'),
('LUTbasedVariableI0RawToAttenuationImageFilter', 'rtk::LUTbasedVariableI0RawToAttenuationImageFilter', 'rtkLUTbasedVariableI0RawToAttenuationImageFilterIUS2IF2', False, 'itk::Image<unsigned short, 2>, itk::Image<float, 2>'),
('LUTbasedVariableI0RawToAttenuationImageFilter', 'rtk::LUTbasedVariableI0RawToAttenuationImageFilter', 'rtkLUTbasedVariableI0RawToAttenuationImageFilterIUS3IF3', False, 'itk::Image<unsigned short, 3>, itk::Image<float, 3>'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterIF31', False, 'itk::Image<float, 3>, 1'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterIF32', False, 'itk::Image<float, 3>, 2'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterIF33', False, 'itk::Image<float, 3>, 3'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterIF34', False, 'itk::Image<float, 3>, 4'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterID31', False, 'itk::Image<double, 3>, 1'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterID32', False, 'itk::Image<double, 3>, 2'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterID33', False, 'itk::Image<double, 3>, 3'),
('LagCorrectionImageFilter', 'rtk::LagCorrectionImageFilter', 'rtkLagCorrectionImageFilterID34', False, 'itk::Image<double, 3>, 4'),
('LastDimensionL0GradientDenoisingImageFilter', 'rtk::LastDimensionL0GradientDenoisingImageFilter', 'rtkLastDimensionL0GradientDenoisingImageFilterIF3', False, 'itk::Image< float,3 >'),
('LastDimensionL0GradientDenoisingImageFilter', 'rtk::LastDimensionL0GradientDenoisingImageFilter', 'rtkLastDimensionL0GradientDenoisingImageFilterID3', False, 'itk::Image< double,3 >'),
('LUT', 'rtk::Functor::LUT', 'rtkFunctorLUTUIF', False, 'unsigned int, float'),
('LUT', 'rtk::Functor::LUT', 'rtkFunctorLUTUID', False, 'unsigned int, double'),
('LUT', 'rtk::Functor::LUT', 'rtkFunctorLUTUSF', False, 'unsigned short, float'),
('LUT', 'rtk::Functor::LUT', 'rtkFunctorLUTUSD', False, 'unsigned short, double'),
('LUT', 'rtk::Functor::LUT', 'rtkFunctorLUTUSUS', False, 'unsigned short, unsigned short'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI2IF2LUTUIF', False, 'itk::Image<unsigned int, 2>, itk::Image<float, 2>, rtk::Functor::LUT< unsigned int, float >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI3IF3LUTUIF', False, 'itk::Image<unsigned int, 3>, itk::Image<float, 3>, rtk::Functor::LUT< unsigned int, float >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI2ID2LUTUID', False, 'itk::Image<unsigned int, 2>, itk::Image<double, 2>, rtk::Functor::LUT< unsigned int, double >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI3ID3LUTUID', False, 'itk::Image<unsigned int, 3>, itk::Image<double, 3>, rtk::Functor::LUT< unsigned int, double >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUS2IF2LUTUSF', False, 'itk::Image<unsigned short, 2>, itk::Image<float, 2>, rtk::Functor::LUT< unsigned short, float >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUS3IF3LUTUSF', False, 'itk::Image<unsigned short, 3>, itk::Image<float, 3>, rtk::Functor::LUT< unsigned short, float >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUS2ID2LUTUSD', False, 'itk::Image<unsigned short, 2>, itk::Image<double, 2>, rtk::Functor::LUT< unsigned short, double >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUS3ID3LUTUSD', False, 'itk::Image<unsigned short, 3>, itk::Image<double, 3>, rtk::Functor::LUT< unsigned short, double >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUS2IUS2LUTUSUS', False, 'itk::Image<unsigned short, 2>, itk::Image<unsigned short, 2>, rtk::Functor::LUT< unsigned short, unsigned short>'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUS3IUS3LUTUSUS', False, 'itk::Image<unsigned short, 3>, itk::Image<unsigned short, 3>, rtk::Functor::LUT< unsigned short, unsigned short>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUI2IF2', False, 'itk::Image<unsigned int, 2>, itk::Image<float, 2>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUI3IF3', False, 'itk::Image<unsigned int, 3>, itk::Image<float, 3>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUI2ID2', False, 'itk::Image<unsigned int, 2>, itk::Image<double, 2>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUI3ID3', False, 'itk::Image<unsigned int, 3>, itk::Image<double, 3>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUS2IF2', False, 'itk::Image<unsigned short, 2>, itk::Image<float, 2>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUS3IF3', False, 'itk::Image<unsigned short, 3>, itk::Image<float, 3>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUS2ID2', False, 'itk::Image<unsigned short, 2>, itk::Image<double, 2>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUS3ID3', False, 'itk::Image<unsigned short, 3>, itk::Image<double, 3>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUS2IUS2', False, 'itk::Image<unsigned short, 2>, itk::Image<unsigned short, 2>'),
('LookupTableImageFilter', 'rtk::LookupTableImageFilter', 'rtkLookupTableImageFilterIUS3IUS3', False, 'itk::Image<unsigned short, 3>, itk::Image<unsigned short, 3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF23IVF23IF3', False, 'itk::Image<itk::Vector< float,2 >,3>, itk::Image<itk::Vector< float,2 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF23IVF33IF3', False, 'itk::Image<itk::Vector< float,2 >,3>, itk::Image<itk::Vector< float,3 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF23IVF43IF3', False, 'itk::Image<itk::Vector< float,2 >,3>, itk::Image<itk::Vector< float,4 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF23IVF53IF3', False, 'itk::Image<itk::Vector< float,2 >,3>, itk::Image<itk::Vector< float,5 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF33IVF23IF3', False, 'itk::Image<itk::Vector< float,3 >,3>, itk::Image<itk::Vector< float,2 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF33IVF33IF3', False, 'itk::Image<itk::Vector< float,3 >,3>, itk::Image<itk::Vector< float,3 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF33IVF43IF3', False, 'itk::Image<itk::Vector< float,3 >,3>, itk::Image<itk::Vector< float,4 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF33IVF53IF3', False, 'itk::Image<itk::Vector< float,3 >,3>, itk::Image<itk::Vector< float,5 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF43IVF23IF3', False, 'itk::Image<itk::Vector< float,4 >,3>, itk::Image<itk::Vector< float,2 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF43IVF33IF3', False, 'itk::Image<itk::Vector< float,4 >,3>, itk::Image<itk::Vector< float,3 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF43IVF43IF3', False, 'itk::Image<itk::Vector< float,4 >,3>, itk::Image<itk::Vector< float,4 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF43IVF53IF3', False, 'itk::Image<itk::Vector< float,4 >,3>, itk::Image<itk::Vector< float,5 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF53IVF23IF3', False, 'itk::Image<itk::Vector< float,5 >,3>, itk::Image<itk::Vector< float,2 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF53IVF33IF3', False, 'itk::Image<itk::Vector< float,5 >,3>, itk::Image<itk::Vector< float,3 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF53IVF43IF3', False, 'itk::Image<itk::Vector< float,5 >,3>, itk::Image<itk::Vector< float,4 >,3>, itk::Image<float,3>'),
('MechlemOneStepSpectralReconstructionFilter', 'rtk::MechlemOneStepSpectralReconstructionFilter', 'rtkMechlemOneStepSpectralReconstructionFilterIVF53IVF53IF3', False, 'itk::Image<itk::Vector< float,5 >,3>, itk::Image<itk::Vector< float,5 >,3>, itk::Image<float,3>'),
('MotionCompensatedFourDConjugateGradientConeBeamReconstructionFilter', 'rtk::MotionCompensatedFourDConjugateGradientConeBeamReconstructionFilter', 'rtkMotionCompensatedFourDConjugateGradientConeBeamReconstructionFilterIF4IF3', False, 'itk::Image<float, 4>, itk::Image<float, 3>'),
('OSEMConeBeamReconstructionFilter', 'rtk::OSEMConeBeamReconstructionFilter', 'rtkOSEMConeBeamReconstructionFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('OSEMConeBeamReconstructionFilter', 'rtk::OSEMConeBeamReconstructionFilter', 'rtkOSEMConeBeamReconstructionFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('OraGeometryReader', 'rtk::OraGeometryReader', 'rtkOraGeometryReader', False),
('OraImageIO', 'rtk::OraImageIO', 'rtkOraImageIO', False),
('OraImageIOFactory', 'rtk::OraImageIOFactory', 'rtkOraImageIOFactory', False),
('ParkerShortScanImageFilter', 'rtk::ParkerShortScanImageFilter', 'rtkParkerShortScanImageFilterIF2', False, 'itk::Image< float,2 >'),
('ParkerShortScanImageFilter', 'rtk::ParkerShortScanImageFilter', 'rtkParkerShortScanImageFilterIF3', False, 'itk::Image< float,3 >'),
('ParkerShortScanImageFilter', 'rtk::ParkerShortScanImageFilter', 'rtkParkerShortScanImageFilterIF4', False, 'itk::Image< float,4 >'),
('ParkerShortScanImageFilter', 'rtk::ParkerShortScanImageFilter', 'rtkParkerShortScanImageFilterID2', False, 'itk::Image< double,2 >'),
('ParkerShortScanImageFilter', 'rtk::ParkerShortScanImageFilter', 'rtkParkerShortScanImageFilterID3', False, 'itk::Image< double,3 >'),
('ParkerShortScanImageFilter', 'rtk::ParkerShortScanImageFilter', 'rtkParkerShortScanImageFilterID4', False, 'itk::Image< double,4 >'),
('PhaseGatingImageFilter', 'rtk::PhaseGatingImageFilter', 'rtkPhaseGatingImageFilterIF3', False, 'itk::Image< float,3 >'),
('PhaseGatingImageFilter', 'rtk::PhaseGatingImageFilter', 'rtkPhaseGatingImageFilterID3', False, 'itk::Image< double,3 >'),
('PolynomialGainCorrectionImageFilter', 'rtk::PolynomialGainCorrectionImageFilter', 'rtkPolynomialGainCorrectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('PolynomialGainCorrectionImageFilter', 'rtk::PolynomialGainCorrectionImageFilter', 'rtkPolynomialGainCorrectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('ProjectGeometricPhantomImageFilter', 'rtk::ProjectGeometricPhantomImageFilter', 'rtkProjectGeometricPhantomImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('ProjectGeometricPhantomImageFilter', 'rtk::ProjectGeometricPhantomImageFilter', 'rtkProjectGeometricPhantomImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('ProjectionGeometry', 'rtk::ProjectionGeometry', 'rtkProjectionGeometry3', False, '3'),
('ProjectionStackToFourDImageFilter', 'rtk::ProjectionStackToFourDImageFilter', 'rtkProjectionStackToFourDImageFilterIF4IF3D', False, 'itk::Image<float, 4>, itk::Image<float, 3>, double'),
('ProjectionStackToFourDImageFilter', 'rtk::ProjectionStackToFourDImageFilter', 'rtkProjectionStackToFourDImageFilterID4ID3D', False, 'itk::Image<double, 4>, itk::Image<double, 3>, double'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderIUC2', False, 'itk::Image< unsigned char,2 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderIUC3', False, 'itk::Image< unsigned char,3 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderIUC4', False, 'itk::Image< unsigned char,4 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderIF2', False, 'itk::Image< float,2 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderIF3', False, 'itk::Image< float,3 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderIF4', False, 'itk::Image< float,4 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderID2', False, 'itk::Image< double,2 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderID3', False, 'itk::Image< double,3 >'),
('ProjectionsReader', 'rtk::ProjectionsReader', 'rtkProjectionsReaderID4', False, 'itk::Image< double,4 >'),
('QuadricShape', 'rtk::QuadricShape', 'rtkQuadricShape', False),
('RayBoxIntersectionImageFilter', 'rtk::RayBoxIntersectionImageFilter', 'rtkRayBoxIntersectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('RayBoxIntersectionImageFilter', 'rtk::RayBoxIntersectionImageFilter', 'rtkRayBoxIntersectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('RayConvexIntersectionImageFilter', 'rtk::RayConvexIntersectionImageFilter', 'rtkRayConvexIntersectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('RayConvexIntersectionImageFilter', 'rtk::RayConvexIntersectionImageFilter', 'rtkRayConvexIntersectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('RayEllipsoidIntersectionImageFilter', 'rtk::RayEllipsoidIntersectionImageFilter', 'rtkRayEllipsoidIntersectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('RayEllipsoidIntersectionImageFilter', 'rtk::RayEllipsoidIntersectionImageFilter', 'rtkRayEllipsoidIntersectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('RayQuadricIntersectionImageFilter', 'rtk::RayQuadricIntersectionImageFilter', 'rtkRayQuadricIntersectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('RayQuadricIntersectionImageFilter', 'rtk::RayQuadricIntersectionImageFilter', 'rtkRayQuadricIntersectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('RegularizedConjugateGradientConeBeamReconstructionFilter', 'rtk::RegularizedConjugateGradientConeBeamReconstructionFilter', 'rtkRegularizedConjugateGradientConeBeamReconstructionFilterIF3', False, 'itk::Image< float,3 >'),
('RegularizedConjugateGradientConeBeamReconstructionFilter', 'rtk::RegularizedConjugateGradientConeBeamReconstructionFilter', 'rtkRegularizedConjugateGradientConeBeamReconstructionFilterID3', False, 'itk::Image< double,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIRGBUC3IRGBUC3', False, 'itk::Image< itk::RGBPixel< unsigned char >,3 >, itk::Image< itk::RGBPixel< unsigned char >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIRGBAUC3IRGBAUC3', False, 'itk::Image< itk::RGBAPixel< unsigned char >,3 >, itk::Image< itk::RGBAPixel< unsigned char >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIVF23IVF23', False, 'itk::Image< itk::Vector< float,2 >,3 >, itk::Image< itk::Vector< float,2 >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIVF33IVF33', False, 'itk::Image< itk::Vector< float,3 >,3 >, itk::Image< itk::Vector< float,3 >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIVF43IVF43', False, 'itk::Image< itk::Vector< float,4 >,3 >, itk::Image< itk::Vector< float,4 >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterICVF23ICVF23', False, 'itk::Image< itk::CovariantVector< float,2 >,3 >, itk::Image< itk::CovariantVector< float,2 >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterICVF33ICVF33', False, 'itk::Image< itk::CovariantVector< float,3 >,3 >, itk::Image< itk::CovariantVector< float,3 >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterICVF43ICVF43', False, 'itk::Image< itk::CovariantVector< float,4 >,3 >, itk::Image< itk::CovariantVector< float,4 >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterISS3ISS3', False, 'itk::Image< signed short,3 >, itk::Image< signed short,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIUC3IUC3', False, 'itk::Image< unsigned char,3 >, itk::Image< unsigned char,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIUS3IUS3', False, 'itk::Image< unsigned short,3 >, itk::Image< unsigned short,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterICD3ICD3', False, 'itk::Image< std::complex< double >,3 >, itk::Image< std::complex< double >,3 >'),
('ReorderProjectionsImageFilter', 'rtk::ReorderProjectionsImageFilter', 'rtkReorderProjectionsImageFilterICF3ICF3', False, 'itk::Image< std::complex< float >,3 >, itk::Image< std::complex< float >,3 >'),
('SARTConeBeamReconstructionFilter', 'rtk::SARTConeBeamReconstructionFilter', 'rtkSARTConeBeamReconstructionFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('SARTConeBeamReconstructionFilter', 'rtk::SARTConeBeamReconstructionFilter', 'rtkSARTConeBeamReconstructionFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('ScatterGlareCorrectionImageFilter', 'rtk::ScatterGlareCorrectionImageFilter', 'rtkScatterGlareCorrectionImageFilterIF3IF3F', False, 'itk::Image<float, 3>, itk::Image<float, 3>, float'),
('ScatterGlareCorrectionImageFilter', 'rtk::ScatterGlareCorrectionImageFilter', 'rtkScatterGlareCorrectionImageFilterID3ID3F', False, 'itk::Image<double, 3>, itk::Image<double, 3>, float'),
('SelectOneProjectionPerCycleImageFilter', 'rtk::SelectOneProjectionPerCycleImageFilter', 'rtkSelectOneProjectionPerCycleImageFilterIF3', False, 'itk::Image< float,3 >'),
('SelectOneProjectionPerCycleImageFilter', 'rtk::SelectOneProjectionPerCycleImageFilter', 'rtkSelectOneProjectionPerCycleImageFilterID3', False, 'itk::Image< double,3 >'),
('SheppLoganPhantomFilter', 'rtk::SheppLoganPhantomFilter', 'rtkSheppLoganPhantomFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('SheppLoganPhantomFilter', 'rtk::SheppLoganPhantomFilter', 'rtkSheppLoganPhantomFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('SimplexSpectralProjectionsDecompositionImageFilter', 'rtk::SimplexSpectralProjectionsDecompositionImageFilter', 'rtkSimplexSpectralProjectionsDecompositionImageFilterVIF3VIF3VIF2IF2IF2', False, 'itk::VectorImage<float, 3>, itk::VectorImage<float, 3>, itk::VectorImage<float, 2>, itk::Image<float, 2>, itk::Image<float, 2>'),
('SimplexSpectralProjectionsDecompositionImageFilter', 'rtk::SimplexSpectralProjectionsDecompositionImageFilter', 'rtkSimplexSpectralProjectionsDecompositionImageFilterVID3VID3VIF2IF2IF2', False, 'itk::VectorImage<double, 3>, itk::VectorImage<double, 3>, itk::VectorImage<float, 2>, itk::Image<float, 2>, itk::Image<float, 2>'),
('SoftThreshold', 'rtk::Functor::SoftThreshold', 'rtkFunctorSoftThresholdFF', False, 'float, float'),
('SoftThreshold', 'rtk::Functor::SoftThreshold', 'rtkFunctorSoftThresholdDD', False, 'double, double'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIF3IF3STFF', False, 'itk::Image<float, 3>, itk::Image<float, 3>, rtk::Functor::SoftThreshold<float, float>'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterID3ID3STDD', False, 'itk::Image<double, 3>, itk::Image<double, 3>, rtk::Functor::SoftThreshold<double, double>'),
('SoftThresholdImageFilter', 'rtk::SoftThresholdImageFilter', 'rtkSoftThresholdImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('SoftThresholdImageFilter', 'rtk::SoftThresholdImageFilter', 'rtkSoftThresholdImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('SpectralForwardModelImageFilter', 'rtk::SpectralForwardModelImageFilter', 'rtkSpectralForwardModelImageFilterVIF3VIF3VIF2IF2IF2', False, 'itk::VectorImage<float, 3>, itk::VectorImage<float, 3>, itk::VectorImage<float, 2>, itk::Image<float, 2>, itk::Image<float, 2>'),
('SpectralForwardModelImageFilter', 'rtk::SpectralForwardModelImageFilter', 'rtkSpectralForwardModelImageFilterVID3VID3VIF2IF2IF2', False, 'itk::VectorImage<double, 3>, itk::VectorImage<double, 3>, itk::VectorImage<float, 2>, itk::Image<float, 2>, itk::Image<float, 2>'),
('SubSelectFromListImageFilter', 'rtk::SubSelectFromListImageFilter', 'rtkSubSelectFromListImageFilterIF3', False, 'itk::Image< float,3 >'),
('SubSelectFromListImageFilter', 'rtk::SubSelectFromListImageFilter', 'rtkSubSelectFromListImageFilterID3', False, 'itk::Image< double,3 >'),
('SubSelectImageFilter', 'rtk::SubSelectImageFilter', 'rtkSubSelectImageFilterIF3', False, 'itk::Image< float,3 >'),
('SubSelectImageFilter', 'rtk::SubSelectImageFilter', 'rtkSubSelectImageFilterID3', False, 'itk::Image< double,3 >'),
('ThreeDCircularProjectionGeometry', 'rtk::ThreeDCircularProjectionGeometry', 'rtkThreeDCircularProjectionGeometry', False),
('XMLReader', 'itk::XMLReader', 'itkXMLReader3DCPG', False, 'rtk::ThreeDCircularProjectionGeometry'),
('XMLReaderBase', 'itk::XMLReaderBase', 'itkXMLFile', False),
('ThreeDCircularProjectionGeometryXMLFileReader', 'rtk::ThreeDCircularProjectionGeometryXMLFileReader', 'rtkThreeDCircularProjectionGeometryXMLFileReader', False),
('XMLWriterBase', 'itk::XMLWriterBase', 'itkXMLWriterBase3DCPG', False, 'rtk::ThreeDCircularProjectionGeometry'),
('ThreeDCircularProjectionGeometryXMLFileWriter', 'rtk::ThreeDCircularProjectionGeometryXMLFileWriter', 'rtkThreeDCircularProjectionGeometryXMLFileWriter', False),
('TotalVariationDenoisingBPDQImageFilter', 'rtk::TotalVariationDenoisingBPDQImageFilter', 'rtkTotalVariationDenoisingBPDQImageFilterIF2ICVF22', False, 'itk::Image<float, 2>, itk::Image<itk::CovariantVector< float,2 >, 2>'),
('TotalVariationDenoisingBPDQImageFilter', 'rtk::TotalVariationDenoisingBPDQImageFilter', 'rtkTotalVariationDenoisingBPDQImageFilterIF3ICVF33', False, 'itk::Image<float, 3>, itk::Image<itk::CovariantVector< float,3 >, 3>'),
('TotalVariationDenoisingBPDQImageFilter', 'rtk::TotalVariationDenoisingBPDQImageFilter', 'rtkTotalVariationDenoisingBPDQImageFilterIF4ICVF44', False, 'itk::Image<float, 4>, itk::Image<itk::CovariantVector< float,4 >, 4>'),
('TotalVariationImageFilter', 'rtk::TotalVariationImageFilter', 'rtkTotalVariationImageFilterIF2', False, 'itk::Image< float,2 >'),
('TotalVariationImageFilter', 'rtk::TotalVariationImageFilter', 'rtkTotalVariationImageFilterIF3', False, 'itk::Image< float,3 >'),
('TotalVariationImageFilter', 'rtk::TotalVariationImageFilter', 'rtkTotalVariationImageFilterIF4', False, 'itk::Image< float,4 >'),
('TotalVariationImageFilter', 'rtk::TotalVariationImageFilter', 'rtkTotalVariationImageFilterID2', False, 'itk::Image< double,2 >'),
('TotalVariationImageFilter', 'rtk::TotalVariationImageFilter', 'rtkTotalVariationImageFilterID3', False, 'itk::Image< double,3 >'),
('TotalVariationImageFilter', 'rtk::TotalVariationImageFilter', 'rtkTotalVariationImageFilterID4', False, 'itk::Image< double,4 >'),
('VarianObiGeometryReader', 'rtk::VarianObiGeometryReader', 'rtkVarianObiGeometryReader', False),
('ObiAttenuation', 'rtk::Function::ObiAttenuation', 'rtkFunctionObiAttenuationUIF', False, 'unsigned int, float'),
('ObiAttenuation', 'rtk::Function::ObiAttenuation', 'rtkFunctionObiAttenuationUID', False, 'unsigned int, double'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI2IF2OAUIF', False, 'itk::Image<unsigned int, 2>, itk::Image<float, 2>, rtk::Function::ObiAttenuation< unsigned int,float >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI2ID2OAUID', False, 'itk::Image<unsigned int, 2>, itk::Image<double, 2>, rtk::Function::ObiAttenuation< unsigned int,double >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI3IF3OAUIF', False, 'itk::Image<unsigned int, 3>, itk::Image<float, 3>, rtk::Function::ObiAttenuation< unsigned int,float >'),
('UnaryFunctorImageFilter', 'itk::UnaryFunctorImageFilter', 'itkUnaryFunctorImageFilterIUI3ID3OAUID', False, 'itk::Image<unsigned int, 3>, itk::Image<double, 3>, rtk::Function::ObiAttenuation< unsigned int,double >'),
('VarianObiRawImageFilter', 'rtk::VarianObiRawImageFilter', 'rtkVarianObiRawImageFilterIUI2IF2', False, 'itk::Image<unsigned int, 2>, itk::Image<float, 2>'),
('VarianObiRawImageFilter', 'rtk::VarianObiRawImageFilter', 'rtkVarianObiRawImageFilterIUI2ID2', False, 'itk::Image<unsigned int, 2>, itk::Image<double, 2>'),
('VarianObiRawImageFilter', 'rtk::VarianObiRawImageFilter', 'rtkVarianObiRawImageFilterIUI3IF3', False, 'itk::Image<unsigned int, 3>, itk::Image<float, 3>'),
('VarianObiRawImageFilter', 'rtk::VarianObiRawImageFilter', 'rtkVarianObiRawImageFilterIUI3ID3', False, 'itk::Image<unsigned int, 3>, itk::Image<double, 3>'),
('VarianProBeamGeometryReader', 'rtk::VarianProBeamGeometryReader', 'rtkVarianProBeamGeometryReader', False),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVIF2IF2', False, 'itk::VectorImage<float, 2>, itk::Image<float, 2>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVIF2IF3', False, 'itk::VectorImage<float, 2>, itk::Image<float, 3>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVIF3IF3', False, 'itk::VectorImage<float, 3>, itk::Image<float, 3>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVIF3IF4', False, 'itk::VectorImage<float, 3>, itk::Image<float, 4>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVID2ID2', False, 'itk::VectorImage<double, 2>, itk::Image<double, 2>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVID2ID3', False, 'itk::VectorImage<double, 2>, itk::Image<double, 3>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVID3ID3', False, 'itk::VectorImage<double, 3>, itk::Image<double, 3>'),
('VectorImageToImageFilter', 'rtk::VectorImageToImageFilter', 'rtkVectorImageToImageFilterVID3ID4', False, 'itk::VectorImage<double, 3>, itk::Image<double, 4>'),
('WaterPrecorrectionImageFilter', 'rtk::WaterPrecorrectionImageFilter', 'rtkWaterPrecorrectionImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('WaterPrecorrectionImageFilter', 'rtk::WaterPrecorrectionImageFilter', 'rtkWaterPrecorrectionImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('XRadGeometryReader', 'rtk::XRadGeometryReader', 'rtkXRadGeometryReader', False),
('XRadImageIO', 'rtk::XRadImageIO', 'rtkXRadImageIO', False),
('XRadImageIOFactory', 'rtk::XRadImageIOFactory', 'rtkXRadImageIOFactory', False),
('XRadRawToAttenuationImageFilter', 'rtk::XRadRawToAttenuationImageFilter', 'rtkXRadRawToAttenuationImageFilterIF3IF3', False, 'itk::Image< float,3 >, itk::Image< float,3 >'),
('XRadRawToAttenuationImageFilter', 'rtk::XRadRawToAttenuationImageFilter', 'rtkXRadRawToAttenuationImageFilterID3ID3', False, 'itk::Image< double,3 >, itk::Image< double,3 >'),
('XimImageIO', 'rtk::XimImageIO', 'rtkXimImageIO', False),
('XimImageIOFactory', 'rtk::XimImageIOFactory', 'rtkXimImageIOFactory', False),
('ZengBackProjectionImageFilter', 'rtk::ZengBackProjectionImageFilter', 'rtkZengBackProjectionImageFilterIF3IF3SWMFDF', False, 'itk::Image<float, 3>, itk::Image< float, 3>'),
('ZengBackProjectionImageFilter', 'rtk::ZengBackProjectionImageFilter', 'rtkZengBackProjectionImageFilterID3ID3SWMDDD', False, 'itk::Image<double, 3>, itk::Image< double, 3>'),
('ZengForwardProjectionImageFilter', 'rtk::ZengForwardProjectionImageFilter', 'rtkZengForwardProjectionImageFilterIF3IF3', False, 'itk::Image<float, 3>, itk::Image<float, 3>'),
('ZengForwardProjectionImageFilter', 'rtk::ZengForwardProjectionImageFilter', 'rtkZengForwardProjectionImageFilterID3ID3', False, 'itk::Image<double, 3>, itk::Image<double, 3>'),
)
factories = (("ImageIO","DCMImagX"),("ImageIO","His"),("ImageIO","Hnc"),("ImageIO","Hnd"),("ImageIO","ImagX"),("ImageIO","Ora"),("ImageIO","XRad"),("ImageIO","Xim"),)
|
PypiClean
|
/HISpectralModel-0.1.0.tar.gz/HISpectralModel-0.1.0/hispectrum/hiutils/fitsWcs.py
|
# Name: fitsWcs
#
# Author: Ian Stewart
#
# TODO:
#
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Copyright (C) 2014 Ian M Stewart
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For the GNU General Public License, see <http://www.gnu.org/licenses/>.
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# History (date, change author):
#
# 2014-05-14 IMS/AIfA
#.......................................................................
# * Copied to this release version.
# - Deleted test harness.
#
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
Defines some classes to store World Coordinate System (WCS) information. The WCS concept was developed with FITS files in view but it has wider applicability: in fact to any situation in which one has an array of numbers of any dimensionality (the elements of the array are denoted in the WCS formalism as 'pixels') and wishes to encode a mapping of pixel number to some other coordinate system (known as the 'world coordinates').
The fundamental references for WCS are
I: "Representations of world coordinates in FITS", Greisen, E.W. & Calabretta, M.R. (2002), Astronomy & Astrophysics, 395, 1061-1075.
Also a supplement to Paper I entitled Concatenation of FITS World Coordinate Systems by Steve Allen and Doug Mink.
II: "Representations of celestial coordinates in FITS", Calabretta. M.R., & Greisen, E.W., (2002), Astronomy & Astrophysics, 395, 1077-1122.
III: "Representations of spectral coordinates in FITS", Greisen, E.W., Calabretta, M.R. Valdes, F.G., & Allen, S.L., (2006), Astronomy & Astrophysics, 446, 747-771.
A quick glance at these will show that WCS can get a lot more complicated than I have presented here. But since I have myself never had occasion to use anything much more complicated than a simple linear mapping, and since this software is intended primarily for my own purposes, I have not attempted to write a really comprehensive interface. In any case there are packages which already exist which cover all the bases.
"""
_module_name = 'fitsWcs'
import numpy as nu
import local_exceptions as ex
#.......................................................................
# Exception classes:
class UnsupportedExtensionType(Exception):
def __init__(self, extensionType):
self.extensionType = extensionType
def __str__(self):
return 'Extension type %s is not supported.' % (self.extensionType)
class MismatchedExtensionType(Exception):
def __init__(self, fileExtensionType, instanceExtensionType):
self.fileExtensionType = fileExtensionType
self.instanceExtensionType = instanceExtensionType
def __str__(self):
return 'Your WCS has extension type %s but the file you are writing to has type %s.'\
% (self.fileExtensionType, self.instanceExtensionType)
class ComboSurprise(Exception):
def __init__(self, axisI, comboI):
self.axisI = axisI
self.comboI = comboI
def __str__(self):
return 'In new combo, found axis %d which already has combo number %d.' % (self.axisI, self.comboI)
class MismatchedCombos(Exception):
def __init__(self, comboI, comboJ):
self.comboI = comboI
self.comboJ = comboJ
def __str__(self):
return 'Expected combo %d but found %d.' % (self.comboI, self.comboJ)
class UnwantedCombo(Exception):
def __init__(self, i, j, comboI):
self.i = i
self.j = j
self.comboI = comboI
def __str__(self):
return 'No CD kwd found at axis pair (%d,%d) but axis %d was found labelled with combo number %d.' % (self.i, self.j, self.j, self.comboI)
class AxisIsInJoint(Exception):
def __init__(self, axisNum):
self.axisNum = axisNum
def __str__(self):
return 'Axis %d is part of a joint-transform group of axes and can not be returned via this method.' % (self.axisNum)
class AxisIsNotInJoint(Exception):
def __init__(self, axisNum):
self.axisNum = axisNum
def __str__(self):
return 'Axis %d is NOT part of a joint-transform group of axes and can not be returned via this method.' % (self.axisNum)
class AxisNotFound(Exception):
def __init__(self, axisNum):
self.axisNum = axisNum
def __str__(self):
return 'Axis %d was not found in the WCS.' % (self.axisNum)
#.......................................................................
class _WCS:
_validExtensionTypes = ['IMAGE','BINTABLE',None]
_ctypeKwdRoots = {'IMAGE':'CTYPE', 'BINTABLE':'TCTYP'}
_cdKwdRoots = {'IMAGE':'CD', 'BINTABLE':'TCD'}
_kwdRoots = {'IMAGE':[ 'CRPIX','CRVAL','CUNIT','CDELT']\
, 'BINTABLE':['TCRPX','TCRVL','TCUNI','TCDLT']}
def __init__(self, extensionType):
self.extensionType = extensionType
#.......................................................................
class WCSAxis(_WCS):
"""
The primary superclass which contains most of the necessary information to define a WCS axis.
"""
_attrNames = ['refInPixels', 'refInWorld', 'worldUnit', 'pixelDeltaWorld'] # order needs to match _WCS._kwdRoots.
def __init__(self, ctype, refInPixels, refInWorld, worldUnit='', axisNumber=None, extensionType=None):
"""Note that the WCS convention is that the centre of the first pixel is located at 1.0 on the axis of pixel values. Thus when calling WCS routines from languages (almost all of them except Fortran!) in which the first element of any array has index zero, one must add 1 to the pixel index to get the WCS pixel coordinate value."""
_WCS.__init__(self, extensionType)
self.ctype = ctype
self.refInPixels = refInPixels
self.refInWorld = refInWorld
self.worldUnit = worldUnit
self.axisNumber = axisNumber # image axis number for arrays, but column number for binary tables. If it is 'None' this is a flag to the addAxis method of FitsHeaderWCS, or the constructor of WcsAxisCombo, to assign the first free axis number.
def compileListOfKwds(self):#, extensionType):
if self.axisNumber==None:
raise ex.NotYetImplemented()
listOfKwds = []
ctypeKwdRoot = self._ctypeKwdRoots[self.extensionType] # inherits from
kwdRoots = self._kwdRoots[ self.extensionType] # _WCS.
# Write CTYPE and 'ordinary' keywords:
#
aI = self.axisNumber
ctypeKwd = '%s%d' % (ctypeKwdRoot, aI) # axisNumber starts at 1.
listOfKwds.append({'name':ctypeKwd,'value':self.ctype})
for k in range(len(kwdRoots)-1): # '-1' because we don't want to assume CDELT at this stage.
kwdName = '%s%d' % (kwdRoots[k], aI)
attrName = self._attrNames[k]
listOfKwds.append({'name':kwdName,'value':self.__dict__[attrName]})
return listOfKwds
def _getStrCore(self, spaces=''):
returnedStr = spaces+' Extension type: %s\n' % (self.extensionType)
if self.extensionType==None:
returnedStr += spaces+' Axis number = %d\n' % (self.axisNumber)
returnedStr += spaces+' Mapping type = %s\n' % (self.ctype)
returnedStr += spaces+'Reference pixel = %f\n' % (self.refInPixels)
returnedStr += spaces+'Reference world = %f\n' % (self.refInWorld)
returnedStr += spaces+' World unit = %s\n' % (self.worldUnit)
else:
returnedStr += spaces+'Axis number = %d\n' % (self.axisNumber)
listOfKwds = self.compileListOfKwds()
for kwd in listOfKwds:
returnedStr += spaces+' %s = %s\n' % (kwd['name'], kwd['value'])
return returnedStr
def __str__(self, spaces=''):
returnedStr = spaces+'<WCSAxis object.\n'
returnedStr += self._getStrCore(spaces+' ')
return returnedStr + spaces+'>'
def copy(self):
newAxisWcs = WCSAxis(self.ctype, self.refInPixels, self.refInWorld\
, self.worldUnit, self.axisNumber, self.extensionType)
return newAxisWcs
#.......................................................................
class _WCSMapping:
"""
A superclass which contains the methods to convert from pixel to world coordinates and back again.
"""
def __init__(self, pixToWorldMatrix):
self._pixToWorldMatrix = pixToWorldMatrix
def _pixelToWorldDeltas(self, pixelDeltaValues):
numAxes = self._pixToWorldMatrix.shape[0]
worldDeltaValues = []
for row in range(numAxes):
worldDeltaValues.append((self._pixToWorldMatrix[row,:] * pixelDeltaValues).sum())
return nu.array(worldDeltaValues)
def _worldToPixelDeltas(self, worldDeltaValues):
numAxes = self._pixToWorldMatrix.shape[0]
invMatrix = nu.linalg.inv(self._pixToWorldMatrix)
pixelDeltaValues = []
for row in range(numAxes):
pixelDeltaValues.append((invMatrix[row,:] * worldDeltaValues).sum())
return nu.array(pixelDeltaValues)
def pixelToWorld(self, pixelValues):
raise ex.EmptyMethod()
def worldToPixel(self, worldValues):
raise ex.EmptyMethod()
#.......................................................................
class WCSAxisSimple(WCSAxis, _WCSMapping):
"""
This is intended to be the shop-front for any WCS axis for which the mapping between pixel and world coordinates depends only on that axis - i.e. there is no rotation implicit in that particular coordinate transform.
"""
def __init__(self, ctype, refInPixels, refInWorld, pixelDeltaWorld\
, worldUnit='', axisNumber=None, extensionType=None):
WCSAxis.__init__(self, ctype, refInPixels, refInWorld, worldUnit, axisNumber, extensionType)
_WCSMapping.__init__(self, nu.array([[pixelDeltaWorld]]))
self._pixelDeltaWorld = pixelDeltaWorld
def pixelToWorld(self, pixelValue):
worldDeltaValue = float(self._pixelToWorldDeltas(pixelValue - self.refInPixels)[0]) # converts from numpy array scalar to honest-to-god python scalar.
return worldDeltaValue + self.refInWorld
def worldToPixel(self, worldValue):
pixelDeltaValue = float(self._worldToPixelDeltas(worldValue - self.refInWorld)[0]) # converts from numpy array scalar to honest-to-god python scalar.
return pixelDeltaValue + self.refInPixels
def pixelsToWorld(self, pixelValues):
worldDeltaValues = (pixelValues - self.refInPixels)*self._pixelDeltaWorld
return worldDeltaValues + self.refInWorld
def worldsToPixel(self, worldValues):
pixelDeltaValues = (worldValues - self.refInWorld)/self._pixelDeltaWorld
return pixelDeltaValues + self.refInPixels
def _getCdeltKwd(self):
kwdRoots = self._kwdRoots[self.extensionType]
kwdName = '%s%d' % (kwdRoots[-1], self.axisNumber)
return {'name':kwdName,'value':self._pixelDeltaWorld}
def compileListOfKwds(self):
listOfKwds = WCSAxis.compileListOfKwds(self)
cdeltKwd = self._getCdeltKwd()
listOfKwds.append(cdeltKwd)
return listOfKwds
def _getStrCore(self, spaces=''):
returnedStr = WCSAxis._getStrCore(self, spaces)
if self.extensionType==None:
returnedStr += spaces+' pixel size = %f (note: can be -ve)\n' % (self._pixelDeltaWorld)
return returnedStr
def __str__(self, spaces=''):
returnedStr = spaces+'<WCSAxisSimple instance.\n'
returnedStr += self._getStrCore(spaces+' ')
return returnedStr + spaces+'>'
def copy(self):
newAxisWcs = WCSAxisSimple(self.ctype, self.refInPixels, self.refInWorld\
, self._pixelDeltaWorld, self.worldUnit, self.axisNumber, self.extensionType)
return newAxisWcs
def writeToHeader(self, hdu, adjustExtType=True):
extensionType = readExtType(hdu)
if extensionType!=self.extensionType:
if adjustExtType:
self.extensionType = extensionType
else:
raise MismatchedExtensionType(extensionType, self.extensionType)
listOfKwds = self.compileListOfKwds()
for kwd in listOfKwds:
if str(kwd['value'])=='':
continue
hdu.header.update(kwd['name'], kwd['value']) # Strictly speaking this is a side effect, tsk. Better to return the hdu??
##### also write comments??
#.......................................................................
class WCSAxesJoint(_WCS, _WCSMapping):
"""
This is intended to be the way to describe several WCS axes for which the the mapping between pixel and world coordinates involves all of the axes and can only be described by a matrix with non-zero off-diagonal terms.
"""
def __init__(self, wcsAxisList, pixToWorldMatrix, extensionType=None):
"""
Specification of the arguments:
- wcsAxisList should be a simple list of WCSAxis objects.
- pixToWorldMatrix should be a square 2D numpy array of floats. This should have the same number of rows as the number of elements in wcsAxisList.
"""
_WCS.__init__(self, extensionType)
_WCSMapping.__init__(self, pixToWorldMatrix)
self._wcsAxisList = wcsAxisList
numAxesInCombo = len(self._wcsAxisList)
self._refsInPixels = nu.zeros([numAxesInCombo], nu.float)
self._refsInWorld = nu.zeros([numAxesInCombo], nu.float)
for i in range(numAxesInCombo):
self._refsInPixels[i] = self._wcsAxisList[i].refInPixels
self._refsInWorld[ i] = self._wcsAxisList[i].refInWorld
self.setExtTypeOfAxes()
def setExtTypeOfAxes(self):
for wcsAxis in self._wcsAxisList:
wcsAxis.extensionType = self.extensionType
def pixelToWorld(self, pixelValues):
worldDeltaValues = self._pixelToWorldDeltas(pixelValues - self._refsInPixels)
return worldDeltaValues + self._refsInWorld
def worldToPixel(self, worldValues):
pixelDeltaValues = self._worldToPixelDeltas(worldValues - self._refsInWorld)
return pixelDeltaValues + self._refsInPixels
def _getCDKwds(self):
cdKwdRoot = self._cdKwdRoots[self.extensionType]
numAxesInCombo = len(self._wcsAxisList)
listOfKwds = []
for row in range(numAxesInCombo):
rowAxis = self._wcsAxisList[row].axisNumber
for col in range(numAxesInCombo):
colAxis = self._wcsAxisList[col].axisNumber
kwdName = '%s%d%s%d' % (cdKwdRoot, rowAxis, '_', colAxis)
listOfKwds.append({'name':kwdName,'value':self._pixToWorldMatrix[row,col]})
return listOfKwds
def compileListsOfKwds(self):
numAxesInCombo = len(self._wcsAxisList)
listsOfKwds = []
for i in range(numAxesInCombo):
listOfKwds.append(self._wcsAxisList[i].compileListOfKwds())
return listOfKwds+self._getCDKwds()
def __str__(self, spaces=''):
numAxesInCombo = len(self._wcsAxisList)
returnedStr = spaces+'<WCSAxesJoint object.\n'
for axis in self._wcsAxisList:
returnedStr += axis.__str__(spaces+' ')+'\n'
returnedStr += spaces+'Elements of the transformation matrix:\n'
if self.extensionType==None:
returnedStr += spaces+' row (axis) col (axis) value:\n'
for row in range(numAxesInCombo):
rowAxis = self._wcsAxisList[row].axisNumber
for col in range(numAxesInCombo):
colAxis = self._wcsAxisList[col].axisNumber
returnedStr += spaces+' %2d %2d %2d %2d %f\n'\
% (row, rowAxis, col, colAxis, self._pixToWorldMatrix[row,col])
else:
listCDKwds = self._getCDKwds()
for kwd in listCDKwds:
returnedStr += spaces+' %s = %s\n' % (kwd['name'], kwd['value'])
return returnedStr + spaces+'>'
def copy(self):
newAxisCombo = WCSAxesJoint(self._wcsAxisList[:], self._pixToWorldMatrix.copy()\
, self.extensionType)
return newAxisCombo
#.......................................................................
def readExtType(hdu):
# Get extensionType:
#
try:
extensionType = hdu.header['XTENSION']
except KeyError:
if hdu.header['SIMPLE']:
extensionType = 'IMAGE' # in the the primary HDU.
return extensionType
#.......................................................................
def readWCSFromFITS(hdu):
extensionType = readExtType(hdu)
# Get the number of axes:
#
if extensionType=='IMAGE':
numAxes = hdu.header['NAXIS']
elif extensionType=='BINTABLE':
numAxes = hdu.header['TFIELDS'] # actually the number of table columns.
else:
raise UnsupportedExtensionType(extensionType)
ctypeKwdRoot = _WCS._ctypeKwdRoots[extensionType]
cdKwdRoot = _WCS._cdKwdRoots[ extensionType]
kwdRoots = _WCS._kwdRoots[ extensionType]
# Find all CD keywords and construct a matrix of their values:
#
cdKwdFound = nu.zeros([numAxes,numAxes], nu.bool)
cdKwdValues = nu.zeros([numAxes,numAxes], nu.float)
for i in range(numAxes):
for j in range(numAxes):
kwdName = '%s%d_%d' % (cdKwdRoot, i+1, j+1)
try:
kwdValA = hdu.header[kwdName]
except KeyError:
continue
cdKwdFound[i,j] = True
cdKwdValues[i,j] = kwdValA
if i==j: continue
# If we got this far, we must have found the keyword. The symmetric one should also be present.
#
kwdName = '%s%d_%d' % (cdKwdRoot, j+1, i+1)
kwdValB = hdu.header[kwdName]
cdKwdFound[j,i] = True
cdKwdValues[j,i] = kwdValB
# Now I want to do some checks and book-keeping to make sure that each axis takes part in no more than 1 combo, and that for each member of each combo, CD kwds exist for all members of the combo (including itself):
#
listOfCombos = []
comboOfAxis = nu.zeros([numAxes], nu.int) # we define that valid combo numbers start from 1.
for i in range(numAxes):
if not cdKwdFound[i,:].any(): # means this axis is not part of a combo.
continue
if comboOfAxis[i]==0: # this means we have not previously looked at any members of the combo of which this axis is part. We should load all the members of this combo.
# Make a list of all axis numbers in the combo:
#
comboAxisIs = []
for j in range(numAxes):
if cdKwdFound[i,j]:
if comboOfAxis[j]>0: raise ComboSurprise(j, comboOfAxis[j])
comboAxisIs.append(j)
# Append this object to the list of combos; then use the new length of this list as the 'combo number' to assign all axes of the combo:
#
listOfCombos.append(comboAxisIs)
numOfCurrentCombo = len(listOfCombos)
for i in comboAxisIs:
comboOfAxis[i] = numOfCurrentCombo
else: # we have previously treated this combo, starting from a different axis. Check that the correct CD values are present:
numOfPreviousCombo = comboOfAxis[i]
for j in range(numAxes):
if cdKwdFound[i,j] and comboOfAxis[j]!=numOfPreviousCombo:
raise MismatchedCombos(numOfPreviousCombo, comboOfAxis[j])
if not cdKwdFound[i,j] and comboOfAxis[j]==numOfPreviousCombo:
raise UnwantedCombo(i, j, j, numOfPreviousCombo)
# Extract and store the non-mapping keyword values for all axes present:
#
tempListOfWcsAxes = []
for i in range(numAxes):
aI = i+1
ctypeKwd = '%s%d' % (ctypeKwdRoot, aI)
try:
ctype = hdu.header[ctypeKwd]
except KeyError:
tempListOfWcsAxes.append(None)
continue # no CTYPEi/TCTYPi, so assume no WCS kwds this axis.
# Read the other non-mapping keywords, i.e. 'CRPIX', 'CRVAL' and 'CUNIT'.
#
kwdName = '%s%d' % (kwdRoots[0], aI) # 'CRPIX'/
refInPixels = hdu.header[kwdName]
kwdName = '%s%d' % (kwdRoots[1], aI) # 'CRVAL'/
refInWorld = hdu.header[kwdName]
kwdName = '%s%d' % (kwdRoots[2], aI) # 'CUNIT'/
try:
worldUnit = hdu.header[kwdName]
except KeyError:
worldUnit = ''
if comboOfAxis[i]!=0: # this axis is part of a joint-transform combination. Just load a non-committal axis:
tempListOfWcsAxes.append(WCSAxis(ctype, refInPixels, refInWorld\
, worldUnit, aI, extensionType))
else: # look for CDELT:
kwdName = '%s%d' % (kwdRoots[3], aI) # 'CDELT'/
pixelDeltaWorld = hdu.header[kwdName]
tempListOfWcsAxes.append(WCSAxisSimple(ctype, refInPixels, refInWorld\
, pixelDeltaWorld, worldUnit, aI, extensionType))
# Now fill the list of elements. I'll do this in 2 passes, the 1st to push all the simple axes, the 2nd to push all the joint axes.
#
listOfElements = []
for i in range(numAxes):
if tempListOfWcsAxes[i]==None or comboOfAxis[i]!=0: # the first means there are no WCS kwds at all for axis i+1, the second means the axis is part of a joint-transform combination.
continue
listOfElements.append(tempListOfWcsAxes[i])
# Now the 2nd pass to extract and push all the joint-transform combinations.
#
for comboAxisIs in listOfCombos:
wcsAxisList = []
numAxesThisCombo = len(comboAxisIs)
pixToWorldMatrix = nu.zeros([numAxesThisCombo,numAxesThisCombo], nu.float)
for ii in range(numAxesThisCombo):
i = comboAxisIs[ii]
wcsAxisList.append(tempListOfWcsAxes[i])
for jj in range(numAxesThisCombo):
j = comboAxisIs[jj]
pixToWorldMatrix[ii,jj] = cdKwdValues[i,j]
# Now construct the combo and add it to the list:
#
axisCombo = WCSAxesJoint(wcsAxisList, pixToWorldMatrix, extensionType)
listOfElements.append(axisCombo)
totalWCS = FitsHeaderWCS(extensionType, listOfElements)
return totalWCS
#. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
class FitsHeaderWCS(_WCS):
_maxAllowedAxisNum = 99
def __init__(self, extensionType=None, listOfElements=[]):
"""
The elements in listOfElements are expected to be objects of type either WCSAxisSimple or WCSAxesJoint.
"""
if not extensionType in self._validExtensionTypes:
raise UnsupportedExtensionType(extensionType)
self.extensionType = extensionType
# For convenience, convert all the WCSAxisSimple elements into single-component WCSAxesJoint objects:
#
self._listOfCombos = []
for element in listOfElements:
if element.__class__==WCSAxisSimple:
axis = WCSAxis(element.ctype, element.refInPixels\
, element.refInWorld, element.worldUnit, element.axisNumber\
, self.extensionType)
pixToWorldMatrix = nu.array([[element._pixelDeltaWorld]])
self._listOfCombos.append(WCSAxesJoint([axis], pixToWorldMatrix, self.extensionType))
elif element.__class__==WCSAxesJoint:
element.extensionType = self.extensionType
element.setExtTypeOfAxes()
self._listOfCombos.append(element)
else:
raise ex.UnrecognizedChoiceObject(element.__class__.__name__)
# Note that we set all extensionType values of the components to be the same as that of the totalWCS.
def hasAxis(self, axisNum):
for wcsCombo in self._listOfCombos:
numAxes = len(wcsCombo._wcsAxisList)
for j in range(numAxes):
if wcsCombo._wcsAxisList[j].axisNumber==axisNum:
return True
else: # didn't find axis at all.
return False
def getAxis(self, axisNum):
"""Returns a WCSAxisSimple object provided that (i) that axis number exists in the FitsHeaderWCS; (ii) it is not part of a joint-transform combination with another axis."""
for wcsCombo in self._listOfCombos:
numAxes = len(wcsCombo._wcsAxisList)
for j in range(numAxes):
wcsAxisSpecs = wcsCombo._wcsAxisList[j]
if wcsAxisSpecs.axisNumber==axisNum:
if numAxes<=1:
pixelDeltaWorld = float(wcsCombo._pixToWorldMatrix[0,0])
break
else:
raise AxisIsInJoint(axisNum)
# end if wcsAxisSpecs.axisNumber==axisNum
else: # didn't find the axis in this combo.
continue # do next combo.
# end inner loop over axes this combo.
# If got to here, means the axis was found, and it was non-joint: so we broke out of the inner loop. So break out of the outer one too - nothing more to do.
break
else: # didn't find axis at all.
raise AxisNotFound(axisNum)
# end outer loop over combos.
# Convert the current WcsAxisSpecs object to a WCSAxisSimple object:
#
wcsAxis = WCSAxisSimple(wcsAxisSpecs.ctype, wcsAxisSpecs.refInPixels\
, wcsAxisSpecs.refInWorld, pixelDeltaWorld, wcsAxisSpecs.worldUnit\
, wcsAxisSpecs.axisNumber, wcsAxisSpecs.extensionType)
return wcsAxis
def getJointWithAxis(self, axisNum):
"""Returns a WCSAxesJoint object provided that (i) that axis number exists in the FitsHeaderWCS; (ii) it is part of a joint-transform combination with another axis."""
for wcsCombo in self._listOfCombos:
numAxes = len(wcsCombo._wcsAxisList)
for j in range(numAxes):
wcsAxisSpecs = wcsCombo._wcsAxisList[j]
if wcsAxisSpecs.axisNumber==axisNum:
if numAxes<=1:
raise AxisIsNotInJoint(axisNum)
else:
break # success. Bust out of these loops, carrying wcsCombo.
# end if wcsAxisSpecs.axisNumber==axisNum
else: # didn't find the axis in this combo.
continue # do next combo.
# end inner loop over axes this combo.
# If got to here, means the axis was found, and it was in a combo: so we broke out of the inner loop. So break out of the outer one too - nothing more to do.
break
else: # didn't find axis at all.
raise AxisNotFound(axisNum)
# end outer loop over combos.
return wcsCombo
def addElement(self, element):
if element.__class__==WCSAxisSimple:
axis = WCSAxis(element.ctype, element.refInPixels\
, element.refInWorld, element.worldUnit, element.axisNumber\
, self.extensionType)
pixToWorldMatrix = nu.array([[element._pixelDeltaWorld]])
self._listOfCombos.append(WCSAxesJoint([axis], pixToWorldMatrix, self.extensionType))
elif element.__class__==WCSAxesJoint:
element.extensionType = self.extensionType
element.setExtTypeOfAxes()
self._listOfCombos.append(element)
else:
raise ex.UnrecognizedChoiceObject(element.__class__.__name__)
def compileListOfKwds(self):
listOfKwds = []
for wcsCombo in self._listOfCombos:
numAxes = len(wcsCombo._wcsAxisList)
if numAxes>1:
listOfKwds += wcsCombo.compileListOfKwds()
else:
wcsAxisSpecs = wcsCombo._wcsAxisList[0]
pixelDeltaWorld = float(wcsCombo._pixToWorldMatrix[0,0])
wcsAxis = WCSAxisSimple(wcsAxisSpecs.ctype, wcsAxisSpecs.refInPixels\
, wcsAxisSpecs.refInWorld, pixelDeltaWorld, wcsAxisSpecs.worldUnit\
, wcsAxisSpecs.axisNumber, wcsAxisSpecs.extensionType)
listOfKwds += wcsAxis.compileListOfKwds()
return listOfKwds
def writeToHeader(self, hdu, adjustExtType=True):
extensionType = readExtType(hdu)
if extensionType!=self.extensionType:
if adjustExtType:
self.extensionType = extensionType
for element in self._listOfCombos:
element.extensionType = extensionType
element.setExtTypeOfAxes()
else:
raise MismatchedExtensionType(extensionType, self.extensionType)
listOfKwds = self.compileListOfKwds()
for kwd in listOfKwds:
if str(kwd['value'])=='':
continue
hdu.header.update(kwd['name'], kwd['value'])
##### also write comments??
return hdu
def __str__(self, spaces=''):
returnedStr = spaces+'<FitsHeaderWCS object.\n'
returnedStr += spaces+' Extension type: %s\n' % (self.extensionType)
for wcsCombo in self._listOfCombos:
numAxes = len(wcsCombo._wcsAxisList)
if numAxes>1:
returnedStr += spaces+wcsCombo.__str__(self.extensionType, spaces+' ')+'\n'
else:
wcsAxisSpecs = wcsCombo._wcsAxisList[0]
pixelDeltaWorld = float(wcsCombo._pixToWorldMatrix[0,0])
wcsAxis = WCSAxisSimple(wcsAxisSpecs.ctype, wcsAxisSpecs.refInPixels\
, wcsAxisSpecs.refInWorld, pixelDeltaWorld, wcsAxisSpecs.worldUnit\
, wcsAxisSpecs.axisNumber, wcsAxisSpecs.extensionType)
returnedStr += spaces+wcsAxis.__str__(spaces+' ')+'\n'
return returnedStr+spaces+'>'
def copy(self):
listOfElements = []
for wcsCombo in self._listOfCombos:
numAxes = len(wcsCombo._wcsAxisList)
if numAxes>1:
listOfElements.append(wcsCombo.copy())
else:
wcsAxisSpecs = wcsCombo._wcsAxisList[0]
pixelDeltaWorld = float(wcsAxisSpecs._pixToWorldMatrix[0,0])
wcsAxis = WCSAxisSimple(wcsAxisSpecs.ctype, wcsAxisSpecs.refInPixels\
, wcsAxisSpecs.refInWorld, pixelDeltaWorld, wcsAxisSpecs.worldUnit\
, wcsAxisSpecs.axisNumber, wcsAxisSpecs.extensionType)
listOfElements.append(wcsAxis.copy())
newTotalWcs = FitsHeaderWCS(self.extensionType, listOfElements)
return newTotalWcs
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if __name__ == '__main__':
pass
|
PypiClean
|
/tableschema-bigquery-1.0.1.tar.gz/tableschema-bigquery-1.0.1/tableschema_bigquery/storage.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import six
import time
import unicodecsv
import tableschema
from apiclient.http import MediaIoBaseUpload
from .mapper import Mapper
# Module API
class Storage(tableschema.Storage):
"""BigQuery storage
Package implements
[Tabular Storage](https://github.com/frictionlessdata/tableschema-py#storage)
interface (see full documentation on the link):

> Only additional API is documented
# Arguments
service (object): BigQuery `Service` object
project (str): BigQuery project name
dataset (str): BigQuery dataset name
prefix (str): prefix for all buckets
"""
# Public
def __init__(self, service, project, dataset, prefix=''):
# Set attributes
self.__service = service
self.__project = project
self.__dataset = dataset
self.__prefix = prefix
self.__buckets = None
self.__descriptors = {}
self.__fallbacks = {}
# Create mapper
self.__mapper = Mapper(prefix=prefix)
def __repr__(self):
# Template and format
template = 'Storage <{service}/{project}-{dataset}>'
text = template.format(
service=self.__service,
project=self.__project,
dataset=self.__dataset)
return text
@property
def buckets(self):
# No cached value
if self.__buckets is None:
# Get response
response = self.__service.tables().list(
projectId=self.__project,
datasetId=self.__dataset).execute()
# Extract buckets
self.__buckets = []
for table in response.get('tables', []):
table_name = table['tableReference']['tableId']
bucket = self.__mapper.restore_bucket(table_name)
if bucket is not None:
self.__buckets.append(bucket)
return self.__buckets
def create(self, bucket, descriptor, force=False):
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
# Iterate over buckets/descriptors
for bucket, descriptor in zip(buckets, descriptors):
# Existent bucket
if bucket in self.buckets:
if not force:
message = 'Bucket "%s" already exists' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Prepare job body
tableschema.validate(descriptor)
table_name = self.__mapper.convert_bucket(bucket)
converted_descriptor, fallbacks = self.__mapper.convert_descriptor(descriptor)
body = {
'tableReference': {
'projectId': self.__project,
'datasetId': self.__dataset,
'tableId': table_name,
},
'schema': converted_descriptor,
}
# Make request
self.__service.tables().insert(
projectId=self.__project,
datasetId=self.__dataset,
body=body).execute()
# Add to descriptors/fallbacks
self.__descriptors[bucket] = descriptor
self.__fallbacks[bucket] = fallbacks
# Remove buckets cache
self.__buckets = None
def delete(self, bucket=None, ignore=False):
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
elif bucket is None:
buckets = reversed(self.buckets)
# Iterater over buckets
for bucket in buckets:
# Non-existent bucket
if bucket not in self.buckets:
if not ignore:
message = 'Bucket "%s" doesn\'t exist.' % bucket
raise tableschema.exceptions.StorageError(message)
return
# Remove from descriptors
if bucket in self.__descriptors:
del self.__descriptors[bucket]
# Make delete request
table_name = self.__mapper.convert_bucket(bucket)
self.__service.tables().delete(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
# Remove tables cache
self.__buckets = None
def describe(self, bucket, descriptor=None):
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table_name = self.__mapper.convert_bucket(bucket)
response = self.__service.tables().get(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
converted_descriptor = response['schema']
descriptor = self.__mapper.restore_descriptor(converted_descriptor)
return descriptor
def iter(self, bucket):
# Get schema/data
schema = tableschema.Schema(self.describe(bucket))
table_name = self.__mapper.convert_bucket(bucket)
response = self.__service.tabledata().list(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
# Collect rows
rows = []
for fields in response['rows']:
row = [field['v'] for field in fields['f']]
rows.append(row)
# Sort rows
# TODO: provide proper sorting solution
rows = sorted(rows, key=lambda row: row[0] if row[0] is not None else 'null')
# Emit rows
for row in rows:
row = self.__mapper.restore_row(row, schema=schema)
yield row
def read(self, bucket):
rows = list(self.iter(bucket))
return rows
def write(self, bucket, rows):
# Write buffer
BUFFER_SIZE = 10000
# Prepare schema, fallbacks
schema = tableschema.Schema(self.describe(bucket))
fallbacks = self.__fallbacks.get(bucket, [])
# Write data
rows_buffer = []
for row in rows:
row = self.__mapper.convert_row(row, schema=schema, fallbacks=fallbacks)
rows_buffer.append(row)
if len(rows_buffer) > BUFFER_SIZE:
self.__write_rows_buffer(bucket, rows_buffer)
rows_buffer = []
if len(rows_buffer) > 0:
self.__write_rows_buffer(bucket, rows_buffer)
# Private
def __write_rows_buffer(self, bucket, rows_buffer):
# Process data to byte stream csv
bytes = io.BufferedRandom(io.BytesIO())
writer = unicodecsv.writer(bytes, encoding='utf-8')
for row in rows_buffer:
writer.writerow(row)
bytes.seek(0)
# Prepare job body
table_name = self.__mapper.convert_bucket(bucket)
body = {
'configuration': {
'load': {
'destinationTable': {
'projectId': self.__project,
'datasetId': self.__dataset,
'tableId': table_name
},
'sourceFormat': 'CSV',
}
}
}
# Prepare job media body
mimetype = 'application/octet-stream'
media_body = MediaIoBaseUpload(bytes, mimetype=mimetype)
# Make request to Big Query
response = self.__service.jobs().insert(
projectId=self.__project,
body=body,
media_body=media_body).execute()
self.__wait_response(response)
def __wait_response(self, response):
# Get job instance
job = self.__service.jobs().get(
projectId=response['jobReference']['projectId'],
jobId=response['jobReference']['jobId'])
# Wait done
while True:
result = job.execute(num_retries=1)
if result['status']['state'] == 'DONE':
if result['status'].get('errors'):
errors = result['status']['errors']
message = '\n'.join(error['message'] for error in errors)
raise tableschema.exceptions.StorageError(message)
break
time.sleep(1)
|
PypiClean
|
/git-deps-1.1.0.zip/git-deps-1.1.0/git_deps/html/node_modules/which-typed-array/README.md
|
# which-typed-array <sup>[![Version Badge][2]][1]</sup>
[![Build Status][3]][4]
[![dependency status][5]][6]
[![dev dependency status][7]][8]
[![License][license-image]][license-url]
[![Downloads][downloads-image]][downloads-url]
[![npm badge][11]][1]
Which kind of Typed Array is this JavaScript value? Works cross-realm, without `instanceof`, and despite Symbol.toStringTag.
## Example
```js
var whichTypedArray = require('which-typed-array');
var assert = require('assert');
assert.equal(false, whichTypedArray(undefined));
assert.equal(false, whichTypedArray(null));
assert.equal(false, whichTypedArray(false));
assert.equal(false, whichTypedArray(true));
assert.equal(false, whichTypedArray([]));
assert.equal(false, whichTypedArray({}));
assert.equal(false, whichTypedArray(/a/g));
assert.equal(false, whichTypedArray(new RegExp('a', 'g')));
assert.equal(false, whichTypedArray(new Date()));
assert.equal(false, whichTypedArray(42));
assert.equal(false, whichTypedArray(NaN));
assert.equal(false, whichTypedArray(Infinity));
assert.equal(false, whichTypedArray(new Number(42)));
assert.equal(false, whichTypedArray('foo'));
assert.equal(false, whichTypedArray(Object('foo')));
assert.equal(false, whichTypedArray(function () {}));
assert.equal(false, whichTypedArray(function* () {}));
assert.equal(false, whichTypedArray(x => x * x));
assert.equal(false, whichTypedArray([]));
assert.equal('Int8Array', whichTypedArray(new Int8Array()));
assert.equal('Uint8Array', whichTypedArray(new Uint8Array()));
assert.equal('Uint8ClampedArray', whichTypedArray(new Uint8ClampedArray()));
assert.equal('Int16Array', whichTypedArray(new Int16Array()));
assert.equal('Uint16Array', whichTypedArray(new Uint16Array()));
assert.equal('Int32Array', whichTypedArray(new Int32Array()));
assert.equal('Uint32Array', whichTypedArray(new Uint32Array()));
assert.equal('Float32Array', whichTypedArray(new Float32Array()));
assert.equal('Float64Array', whichTypedArray(new Float64Array()));
assert.equal('BigInt64Array', whichTypedArray(new BigInt64Array()));
assert.equal('BigUint64Array', whichTypedArray(new BigUint64Array()));
```
## Tests
Simply clone the repo, `npm install`, and run `npm test`
[1]: https://npmjs.org/package/which-typed-array
[2]: http://versionbadg.es/inspect-js/which-typed-array.svg
[3]: https://travis-ci.org/inspect-js/which-typed-array.svg
[4]: https://travis-ci.org/inspect-js/which-typed-array
[5]: https://david-dm.org/inspect-js/which-typed-array.svg
[6]: https://david-dm.org/inspect-js/which-typed-array
[7]: https://david-dm.org/inspect-js/which-typed-array/dev-status.svg
[8]: https://david-dm.org/inspect-js/which-typed-array#info=devDependencies
[11]: https://nodei.co/npm/which-typed-array.png?downloads=true&stars=true
[license-image]: http://img.shields.io/npm/l/which-typed-array.svg
[license-url]: LICENSE
[downloads-image]: http://img.shields.io/npm/dm/which-typed-array.svg
[downloads-url]: http://npm-stat.com/charts.html?package=which-typed-array
|
PypiClean
|
/microcosm-pubsub-2.28.0.tar.gz/microcosm-pubsub-2.28.0/microcosm_pubsub/result.py
|
from dataclasses import dataclass, field
from enum import Enum, unique
from logging import DEBUG, INFO, WARNING
from sys import exc_info
from typing import (
Any,
Dict,
Optional,
Tuple,
)
from microcosm.opaque import Opaque
from microcosm_pubsub.errors import (
IgnoreMessage,
Nack,
SkipMessage,
TTLExpired,
)
from microcosm_pubsub.message import SQSMessage
from microcosm_pubsub.sentry import SentryConfigPubsub
@dataclass
class MessageHandlingResultTypeInfo:
name: str
level: int
retry: bool = False
exc_info: bool = False
@unique
class MessageHandlingResultType(Enum):
# Messaging handling aborted due to too many attempts.
EXPIRED = MessageHandlingResultTypeInfo(name="EXPIRED", level=WARNING)
# Messaging handling failed.
FAILED = MessageHandlingResultTypeInfo(name="FAILED", level=WARNING, exc_info=True, retry=True)
# Message was not handled.
IGNORED = MessageHandlingResultTypeInfo(name="IGNORED", level=DEBUG)
# Messaging handling was intentionally retried.
RETRIED = MessageHandlingResultTypeInfo(name="RETRIED", level=INFO, retry=True)
# Message handling succeeded.
SUCCEEDED = MessageHandlingResultTypeInfo(name="SUCCEEDED", level=INFO)
# Message handling was skipped.
# ("Upon closer inspection these are loafers")
SKIPPED = MessageHandlingResultTypeInfo(name="SKIPPED", level=INFO)
def __str__(self):
return self.name
@property
def exc_info(self):
return self.value.exc_info
@property
def level(self):
return self.value.level
@property
def retry(self):
return self.value.retry
@dataclass
class MessageHandlingResult:
media_type: str
result: MessageHandlingResultType
exc_info: Optional[Tuple[Any, Any, Any]] = None
extra: Dict[str, str] = field(default_factory=dict)
elapsed_time: Optional[float] = None
handle_start_time: Optional[float] = None
retry_timeout_seconds: Optional[int] = None
@classmethod
def invoke(cls, handler, message: SQSMessage):
try:
success = handler(message.content)
return cls.from_result(message, bool(success))
except Exception as error:
return cls.from_error(message, error)
@classmethod
def from_result(cls, message: SQSMessage, success: bool):
if success:
result = MessageHandlingResultType.SUCCEEDED
else:
result = MessageHandlingResultType.SKIPPED
return cls(
media_type=message.media_type,
result=result,
)
@classmethod
def from_error(cls, message: SQSMessage, error: Exception, **kwargs):
if isinstance(error, IgnoreMessage):
return cls(
extra=error.extra,
media_type=message.media_type,
result=MessageHandlingResultType.IGNORED,
)
if isinstance(error, SkipMessage):
return cls(
extra=dict(
reason=str(error),
**error.extra
),
media_type=message.media_type,
result=MessageHandlingResultType.SKIPPED,
)
if isinstance(error, TTLExpired):
return cls(
extra=error.extra,
media_type=message.media_type,
result=MessageHandlingResultType.EXPIRED,
)
if isinstance(error, Nack):
return cls(
extra=dict(
reason=str(error),
**error.extra
),
media_type=message.media_type,
result=MessageHandlingResultType.RETRIED,
retry_timeout_seconds=error.visibility_timeout_seconds,
)
return cls(
exc_info=exc_info(),
media_type=message.media_type,
result=MessageHandlingResultType.FAILED,
)
def log(self, logger, opaque):
"""
Log this result.
"""
entry = f"Result for media type: {self.media_type} was : {self.result} "
logger.log(
self.result.level,
entry,
exc_info=self.exc_info,
extra={
"media_type": self.media_type,
**opaque.as_dict(),
**self.extra,
},
)
def error_reporting(self, sentry_config: SentryConfigPubsub, opaque: Opaque) -> None:
if not all([
sentry_config.enabled,
self.result in [
MessageHandlingResultType.FAILED,
MessageHandlingResultType.EXPIRED,
],
self.exc_info,
]):
return
self._report_error(opaque, sentry_config.tag_mapping, sentry_config.user_id_key)
def _report_error(self, opaque, tag_mapping, user_id_key):
from sentry_sdk import capture_exception
from sentry_sdk import configure_scope
opaque = opaque.as_dict()
with configure_scope() as scope:
scope.user = {"id": opaque.get(user_id_key)}
for opaque_key, tag_key in tag_mapping.items():
scope.set_tag(tag_key, opaque.get(opaque_key))
capture_exception(self.exc_info, scope=scope)
def resolve(self, message):
if self.result.retry:
message.nack(self.retry_timeout_seconds)
else:
message.ack()
return self
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/network/v20150501preview/get_security_rule.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSecurityRuleResult',
'AwaitableGetSecurityRuleResult',
'get_security_rule',
]
@pulumi.output_type
class GetSecurityRuleResult:
"""
Network security rule
"""
def __init__(__self__, access=None, description=None, destination_address_prefix=None, destination_port_range=None, direction=None, etag=None, id=None, name=None, priority=None, protocol=None, provisioning_state=None, source_address_prefix=None, source_port_range=None):
if access and not isinstance(access, str):
raise TypeError("Expected argument 'access' to be a str")
pulumi.set(__self__, "access", access)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if destination_address_prefix and not isinstance(destination_address_prefix, str):
raise TypeError("Expected argument 'destination_address_prefix' to be a str")
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
if destination_port_range and not isinstance(destination_port_range, str):
raise TypeError("Expected argument 'destination_port_range' to be a str")
pulumi.set(__self__, "destination_port_range", destination_port_range)
if direction and not isinstance(direction, str):
raise TypeError("Expected argument 'direction' to be a str")
pulumi.set(__self__, "direction", direction)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_address_prefix and not isinstance(source_address_prefix, str):
raise TypeError("Expected argument 'source_address_prefix' to be a str")
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if source_port_range and not isinstance(source_port_range, str):
raise TypeError("Expected argument 'source_port_range' to be a str")
pulumi.set(__self__, "source_port_range", source_port_range)
@property
@pulumi.getter
def access(self) -> str:
"""
Gets or sets network traffic is allowed or denied. Possible values are 'Allow' and 'Deny'
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Gets or sets a description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> str:
"""
Gets or sets destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[str]:
"""
Gets or sets Destination Port or Range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@property
@pulumi.getter
def direction(self) -> str:
"""
Gets or sets the direction of the rule.InBound or Outbound. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Gets or sets the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def protocol(self) -> str:
"""
Gets or sets Network protocol this rule applies to. Can be Tcp, Udp or All(*).
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> str:
"""
Gets or sets source address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[str]:
"""
Gets or sets Source Port or Range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
class AwaitableGetSecurityRuleResult(GetSecurityRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityRuleResult(
access=self.access,
description=self.description,
destination_address_prefix=self.destination_address_prefix,
destination_port_range=self.destination_port_range,
direction=self.direction,
etag=self.etag,
id=self.id,
name=self.name,
priority=self.priority,
protocol=self.protocol,
provisioning_state=self.provisioning_state,
source_address_prefix=self.source_address_prefix,
source_port_range=self.source_port_range)
def get_security_rule(network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
security_rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityRuleResult:
"""
Use this data source to access information about an existing resource.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
:param str security_rule_name: The name of the security rule.
"""
__args__ = dict()
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
__args__['securityRuleName'] = security_rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20150501preview:getSecurityRule', __args__, opts=opts, typ=GetSecurityRuleResult).value
return AwaitableGetSecurityRuleResult(
access=__ret__.access,
description=__ret__.description,
destination_address_prefix=__ret__.destination_address_prefix,
destination_port_range=__ret__.destination_port_range,
direction=__ret__.direction,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
priority=__ret__.priority,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
source_address_prefix=__ret__.source_address_prefix,
source_port_range=__ret__.source_port_range)
|
PypiClean
|
/django-treeadmin-fork-alt-storage-0.5.tar.gz/django-treeadmin-fork-alt-storage-0.5/docs/index.rst
|
django-treeadmin
================
django-treeadmin is a complement to models that are `django-mptt`_-enabled.
It provides a drag-&-drop interface to modify the tree.
It was originally developed as part of `FeinCMS`_. They deserve all the kudos
for this.
.. _django-mptt: https://github.com/django-mptt/django-mptt
.. _FeinCMS: http://www.feinheit.ch/media/labs/feincms/
Prerequisites
-------------
If you want to use django-treeadmin, make sure that the following points are fulfilled:
* your project uses ``staticfiles``, either through Django's
:mod:`django.contrib.staticfiles` (part of Django since 1.3) or
`django-staticfiles`_.
* your model uses ``django-mptt``
.. _django-staticfiles: https://github.com/jezdez/django-staticfiles
Usage
-----
To activate the treeadmin, inherit from :class:`treeadmin.admin.TreeAdmin` in
your ``admin.py``::
from django.contrib import admin
from treeadmin.admin import TreeAdmin
from myapp.models import MyModel
class MyModelAdmin(TreeAdmin):
pass
admin.site.register(MyModel, MyModelAdmin)
.. warning::
If your project uses `johnny-cache`_, make sure to inherit from
:class:`JohnnyCacheAwareTreeAdmin` instead of
:class:`~treeadmin.admin.TreeAdmin`. It tries to get around some cache
invalidation problems that occur when django-mptt is used together with
johnny-cache.
If the state of the tree is mission-critical for your project, it's
probably best to add its table to ``JOHNNY_TABLE_BLACKLIST``. Please
refer to johnny-cache's documentation for more information.
.. _johnny-cache: http://packages.python.org/johnny-cache/
``TreeAdmin`` Options
---------------------
.. class:: treeadmin.admin.TreeAdmin
The behaviour of the tree admin can be influenced with a couple of class
attributes:
.. attribute:: TreeAdmin.filter_include_ancestors
Controls if ancestors should be displayed on a filtered list
By default, it is set to ``False``.
.. note::
This corresponds to the ``FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS``
setting in FeinCMS.
.. attribute:: TreeAdmin.enable_object_permissions
If set to ``True``, permission checks will be made on the object level.
Make sure to have an authentication backend that supports object level
permissions, or weird things will happen.
By default, it is set to ``False``.
.. note::
This corresponds to the ``FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS``
setting in FeinCMS.
.. attribute:: TreeAdmin.jquery_use_google_cdn
If set to ``True``, jQuery and jQuery UI are loaded from Google's CDN.
By default, it is set to ``False``.
..note::
This corresponds to the ``FEINCMS_ADMIN_MEDIA_HOTLINKING`` setting
in FeinCMS.
.. attribute:: TreeAdmin.jquery_no_conflict
If set to ``True``, loads jQuery in the ``noconflict`` mode.
By default, it is set to ``False``.
.. note::
This correspnds to the ``FEINCMS_JQUERY_NO_CONFLICT`` setting in
FeinCMS.
|
PypiClean
|
/deepvision-toolkit-0.1.6.tar.gz/deepvision-toolkit-0.1.6/deepvision/evaluation/feature_analysis.py
|
import matplotlib.pyplot as plt
import tensorflow as tf
import torch
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
class FeatureAnalyzer:
def __init__(
self,
model,
dataset,
backend,
classnames=None,
random_state=42,
limit_batches=-1,
):
self.model = model
self.dataset = dataset
self.backend = backend
self.limit_batches = len(dataset) if limit_batches == -1 else limit_batches
self.random_state = random_state
if limit_batches > len(dataset):
raise ValueError(
f"`limit_batches` is set to a higher number than there are batches in your dataset."
)
self.classnames = classnames
self.all_features = None
self.all_classes = None
def process_dataset_tf(self):
all_features = []
all_classes = []
if self.limit_batches > -1:
dataset = self.dataset.take(self.limit_batches)
else:
dataset = self.dataset
for index, batch in enumerate(dataset):
print(f"Processing batch {index}/{len(self.dataset)}", end="\r")
images, labels = batch
features = self.model(images)
# If the output is a `dict` with an `output`
# key, such as for Functional Subclassing models
# extract the `'output'` key, that all DeepVision models support.
# Else - take the `tf.Tensor` output.
if isinstance(features, dict):
features = features["output"]
all_features.append(features)
all_classes.append(labels)
print(f"\nProcessing finished. Extracting features and classes...")
all_classes_tf = tf.stack(all_classes)
all_classes_tf = tf.reshape(all_classes_tf, -1)
all_features_tf = tf.stack(all_features)
all_features_tf = tf.reshape(
all_features_tf, shape=(all_classes_tf.shape[0], -1)
)
if self.classnames is None:
# tf.unique() returns a tuple of unique values and indices
classnames, idx = tf.unique(all_classes_tf)
classnames = classnames.numpy()
self.classnames = classnames
all_features = all_features_tf.numpy()
all_classes = all_classes_tf.numpy()
self.all_features = all_features
self.all_classes = all_classes
def process_dataset_pt(self):
all_features = []
all_classes = []
with torch.no_grad():
for index, batch in enumerate(self.dataset):
if index > self.limit_batches:
break
print(f"Processing batch {index}/{len(self.dataset)}", end="\r")
images, labels = batch
images = images.to(self.model.device)
labels = labels.to(self.model.device)
features = self.model(images)
all_features.append(features)
all_classes.append(labels)
print(f"\nProcessing finished. Extracting features and classes...")
all_classes_torch = torch.stack(all_classes)
all_classes_torch = all_classes_torch.flatten()
all_features_torch = torch.stack(all_features).reshape(
all_classes_torch.shape[0], -1
)
if self.classnames is None:
classnames = torch.unique(all_classes_torch).detach().cpu().numpy()
self.classnames = classnames
all_features = all_features_torch.detach().cpu().numpy()
all_classes = all_classes_torch.detach().cpu().numpy()
self.all_features = all_features
self.all_classes = all_classes
def extract_features(self):
if self.backend == "pytorch":
self.process_dataset_pt()
else:
self.process_dataset_tf()
print(
"Features extracted. You can now visualize them or perform analysis without re-running the extraction."
)
def feature_analysis(
self,
components,
figsize=(10, 10),
tsne_verbose=1,
perplexity=75,
n_iter=1000,
legend=True,
):
if self.all_classes is None or self.all_features is None:
raise ValueError(
f"Features and classes are None. Did you forget to call `extract_features()` first?"
)
print(f"Principal component analysis...")
pca = PCA(n_components=components, random_state=self.random_state)
features_pca = pca.fit_transform(self.all_features)
tsne = TSNE(
n_components=components,
verbose=tsne_verbose,
perplexity=perplexity,
n_iter=n_iter,
random_state=self.random_state,
metric="euclidean",
)
features_tsne = tsne.fit_transform(features_pca)
if components == 3:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(121, projection="3d")
ax.set_title("Learned Feature PCA")
for class_id, classname in enumerate(self.classnames):
ax.scatter(
features_pca[:, 0][self.all_classes == class_id],
features_pca[:, 1][self.all_classes == class_id],
features_pca[:, 2][self.all_classes == class_id],
label=classname,
alpha=0.4,
)
if legend:
ax.legend()
ax = fig.add_subplot(122, projection="3d")
ax.set_title("Learned Feature t-Stochastic Neighbor Embeddings")
for class_id, classname in enumerate(self.classnames):
ax.scatter(
features_tsne[:, 0][self.all_classes == class_id],
features_tsne[:, 1][self.all_classes == class_id],
features_tsne[:, 2][self.all_classes == class_id],
label=classname,
alpha=0.4,
)
if legend:
ax.legend()
else:
fig, ax = plt.subplots(2, figsize=figsize)
ax[0].set_title("Learned Feature PCA")
ax[1].set_title("Learned Feature t-Stochastic Neighbor Embeddings")
for class_id, classname in enumerate(self.classnames):
ax[0].scatter(
features_pca[:, 0][self.all_classes == class_id],
features_pca[:, 1][self.all_classes == class_id],
label=classname,
alpha=0.4,
)
ax[1].scatter(
features_tsne[:, 0][self.all_classes == class_id],
features_tsne[:, 1][self.all_classes == class_id],
label=classname,
alpha=0.4,
)
if legend:
ax[0].legend()
ax[1].legend()
plt.show()
|
PypiClean
|
/guacamole-0.9.2.tar.gz/guacamole-0.9.2/docs/usage/concepts.rst
|
.. _bundled_ingredients:
Recipes, Ingredients and Spices
===============================
Guacamole is a framework for creating command line applications. To understand
how to use it, you need to know about the three concepts (recipes, ingredients
and spices). They define how guacamole works (tastes) and they are how you can
make guacamole work for you in new and interesting ways.
Ingredients
-----------
Ingredients are pluggable components that can be added to a guacamole recipe.
They have well-defined APIs and are invoked by guacamole during the lifetime of
the application. You can think of ingredients as of middleware or a fancy
context manager. For an in-depth documentation see the
:class:`~guacamole.core.Ingredient` class. For a list of bundled ingredients
(batteries included) please see `bundled-ingredients`.
**Guacamole uses ingredients to avoid having complex, convoluted core. The core
literally does nothing more than to invoke all ingredients in a given order.
Applications use ingredietns indirectly, through recipes.**
Spices
------
Spices are small, optional bits of taste that can be added along with a given
ingredient. They are just a feature flag with a fancy name. You will see spices
documented along with each ingredient. For many features you will use the sane
defaults that guacamole aims to provide but sometimes you may want to tweak
something. Such elements can be hidden behind an ingredient.
**Guacamole uses spices to offer fixed cusomizability where it makes sense to
do so. Applications say witch spices they wish to use. Spices always enable
non-default behavior.**
Recipes
-------
Recipes define the sequence of ingredients to use for a tasty guacamole. In
reality a recipe is a simple function that returns a list of ingredient
instances to use in a given application.
**Guacamole uses recipes to offer easy-to-use, well-designed patterns for
creating applications. Anyone can create a recipe that uses a set of
ingredients that fit a particular purpose.**
Command?
--------
The :class:`~guacamole.recipes.cmd.Command` class is just a recipe that uses a
set of ingredients. As Guacamole matures, other recipes may be added.
|
PypiClean
|
/pytest-pingguo-pytest-plugin-0.1.0.tar.gz/pytest-pingguo-pytest-plugin-0.1.0/README.rst
|
============================
pytest-pingguo-pytest-plugin
============================
.. image:: https://img.shields.io/pypi/v/pytest-pingguo-pytest-plugin.svg
:target: https://pypi.org/project/pytest-pingguo-pytest-plugin
:alt: PyPI version
.. image:: https://img.shields.io/pypi/pyversions/pytest-pingguo-pytest-plugin.svg
:target: https://pypi.org/project/pytest-pingguo-pytest-plugin
:alt: Python versions
.. image:: https://ci.appveyor.com/api/projects/status/github/wessonlan/pytest-pingguo-pytest-plugin?branch=master
:target: https://ci.appveyor.com/project/wessonlan/pytest-pingguo-pytest-plugin/branch/master
:alt: See Build Status on AppVeyor
pingguo test
----
This `pytest`_ plugin was generated with `Cookiecutter`_ along with `@hackebrot`_'s `cookiecutter-pytest-plugin`_ template.
Features
--------
* mytest
Requirements
------------
* mytest
Installation
------------
You can install "pytest-pingguo-pytest-plugin" via `pip`_ from `PyPI`_::
$ pip install pytest-pingguo-pytest-plugin
Usage
-----
* mytest
Contributing
------------
Contributions are very welcome. Tests can be run with `tox`_, please ensure
the coverage at least stays the same before you submit a pull request.
License
-------
Distributed under the terms of the `Mozilla Public License 2.0`_ license, "pytest-pingguo-pytest-plugin" is free and open source software
Issues
------
If you encounter any problems, please `file an issue`_ along with a detailed description.
.. _`Cookiecutter`: https://github.com/audreyr/cookiecutter
.. _`@hackebrot`: https://github.com/hackebrot
.. _`MIT`: http://opensource.org/licenses/MIT
.. _`BSD-3`: http://opensource.org/licenses/BSD-3-Clause
.. _`GNU GPL v3.0`: http://www.gnu.org/licenses/gpl-3.0.txt
.. _`Apache Software License 2.0`: http://www.apache.org/licenses/LICENSE-2.0
.. _`cookiecutter-pytest-plugin`: https://github.com/pytest-dev/cookiecutter-pytest-plugin
.. _`file an issue`: https://github.com/wessonlan/pytest-pingguo-pytest-plugin/issues
.. _`pytest`: https://github.com/pytest-dev/pytest
.. _`tox`: https://tox.readthedocs.io/en/latest/
.. _`pip`: https://pypi.org/project/pip/
.. _`PyPI`: https://pypi.org/project
|
PypiClean
|
/hawks-0.1.0.tar.gz/hawks-0.1.0/README.md
|
# HAWKS Data Generator

HAWKS is a tool for generating controllably difficult synthetic data, used primarily for clustering. This repo is associated with the following paper:
1. Shand, C, Allmendinger, R, Handl, J, Webb, A & Keane, J 2019, Evolving Controllably Difficult Datasets for Clustering. in Proceedings of the Annual Conference on Genetic and Evolutionary Computation (GECCO '19) . The Genetic and Evolutionary Computation Conference, Prague, Czech Republic, 13/07/19. [https://doi.org/10.1145/3321707.3321761](https://doi.org/10.1145/3321707.3321761) **(Nominated for best paper on the evolutionary machine learning track at GECCO'19)**
The academic/technical details can be found there. What follows here is a practical guide to using this tool to generate synthetic data.
If you use this tool to generate data that forms part of a paper, please consider either linking to this work or citing the paper above.
## Installation
Installation is available through pip by:
```
pip install hawks
```
or by cloning this repo (and installing locally using `pip install .`).
## Running HAWKS
Like any other package, you need to `import hawks` in order to use it. The parameters of hawks are configured via a config file system. Details of the parameters are found in the [user guide](https://github.com/sea-shunned/hawks/blob/master/user_guide.md). For any parameters that are not specified, default values will be used (as defined in `hawks/defaults.json`).
The example below illustrates how to run `hawks`. Either a dictionary or a path to a JSON config can be provided to override any of the default values.
```python
from pathlib import Path
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
import hawks
# Fix the seed number
config = {
"hawks": {
"seed_num": 42
}
}
# Any missing parameters will take the default seen in configs/defaults.json
generator = hawks.create_generator(config)
# Run the generator
generator.run()
# Get the best dataset found and it's labels
data, labels = generator.get_best_dataset()
# # Plot the best dataset to see how it looks
# generator.plot_best_indiv()
# Run KMeans on the data
km = KMeans(
n_clusters=len(np.unique(labels)), random_state=42
).fit(data)
# Get the Adjusted Rand Index for KMeans on the data
ari = adjusted_rand_score(labels, km.labels_)
print(f"ARI: {ari}")
```
## User Guide
For a more detailed explanation of the parameters and how to use HAWKS, please read the [user guide](https://github.com/sea-shunned/hawks/blob/master/user_guide.md).
## Issues
As this work is still in development, plain sailing is not guaranteed. If you encounter an issue, first ensure that `hawks` is running as intended by navigating to the tests directory, and running `python tests.py`. If any test fails, please add details of this alongside your original problem to an issue on the [GitHub repo](https://github.com/sea-shunned/hawks/issues).
## Feature Requests
At present, this is primarily academic work, so future developments will be released here after they have been published. If you have any suggestions or simple feature requests for HAWKS as a tool to use, please raise that on the [GitHub repo](https://github.com/sea-shunned/hawks/issues).
If you are interested in extending this work or collaborating, please email cameron(dot)shand(at)manchester(dot)ac(dot)uk.
|
PypiClean
|
/termage-0.6.0.tar.gz/termage-0.6.0/site/assets/javascripts/lunr/min/lunr.es.min.js
|
* based on
* Snowball JavaScript Library v0.3
* http://code.google.com/p/urim/
* http://snowball.tartarus.org/
*
* Copyright 2010, Oleg Mazko
* http://www.mozilla.org/MPL/
*/
!function(e,s){"function"==typeof define&&define.amd?define(s):"object"==typeof exports?module.exports=s():s()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.es=function(){this.pipeline.reset(),this.pipeline.add(e.es.trimmer,e.es.stopWordFilter,e.es.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.es.stemmer))},e.es.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.es.trimmer=e.trimmerSupport.generateTrimmer(e.es.wordCharacters),e.Pipeline.registerFunction(e.es.trimmer,"trimmer-es"),e.es.stemmer=function(){var s=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(){if(A.out_grouping(x,97,252)){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}return!1}return!0}function n(){if(A.in_grouping(x,97,252)){var s=A.cursor;if(e()){if(A.cursor=s,!A.in_grouping(x,97,252))return!0;for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}}return!1}return!0}function i(){var s,r=A.cursor;if(n()){if(A.cursor=r,!A.out_grouping(x,97,252))return;if(s=A.cursor,e()){if(A.cursor=s,!A.in_grouping(x,97,252)||A.cursor>=A.limit)return;A.cursor++}}g=A.cursor}function a(){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}return!0}function t(){var e=A.cursor;g=A.limit,p=g,v=g,i(),A.cursor=e,a()&&(p=A.cursor,a()&&(v=A.cursor))}function o(){for(var e;;){if(A.bra=A.cursor,e=A.find_among(k,6))switch(A.ket=A.cursor,e){case 1:A.slice_from("a");continue;case 2:A.slice_from("e");continue;case 3:A.slice_from("i");continue;case 4:A.slice_from("o");continue;case 5:A.slice_from("u");continue;case 6:if(A.cursor>=A.limit)break;A.cursor++;continue}break}}function u(){return g<=A.cursor}function w(){return p<=A.cursor}function c(){return v<=A.cursor}function m(){var e;if(A.ket=A.cursor,A.find_among_b(y,13)&&(A.bra=A.cursor,(e=A.find_among_b(q,11))&&u()))switch(e){case 1:A.bra=A.cursor,A.slice_from("iendo");break;case 2:A.bra=A.cursor,A.slice_from("ando");break;case 3:A.bra=A.cursor,A.slice_from("ar");break;case 4:A.bra=A.cursor,A.slice_from("er");break;case 5:A.bra=A.cursor,A.slice_from("ir");break;case 6:A.slice_del();break;case 7:A.eq_s_b(1,"u")&&A.slice_del()}}function l(e,s){if(!c())return!0;A.slice_del(),A.ket=A.cursor;var r=A.find_among_b(e,s);return r&&(A.bra=A.cursor,1==r&&c()&&A.slice_del()),!1}function d(e){return!c()||(A.slice_del(),A.ket=A.cursor,A.eq_s_b(2,e)&&(A.bra=A.cursor,c()&&A.slice_del()),!1)}function b(){var e;if(A.ket=A.cursor,e=A.find_among_b(S,46)){switch(A.bra=A.cursor,e){case 1:if(!c())return!1;A.slice_del();break;case 2:if(d("ic"))return!1;break;case 3:if(!c())return!1;A.slice_from("log");break;case 4:if(!c())return!1;A.slice_from("u");break;case 5:if(!c())return!1;A.slice_from("ente");break;case 6:if(!w())return!1;A.slice_del(),A.ket=A.cursor,e=A.find_among_b(C,4),e&&(A.bra=A.cursor,c()&&(A.slice_del(),1==e&&(A.ket=A.cursor,A.eq_s_b(2,"at")&&(A.bra=A.cursor,c()&&A.slice_del()))));break;case 7:if(l(P,3))return!1;break;case 8:if(l(F,3))return!1;break;case 9:if(d("at"))return!1}return!0}return!1}function f(){var e,s;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(W,12),A.limit_backward=s,e)){if(A.bra=A.cursor,1==e){if(!A.eq_s_b(1,"u"))return!1;A.slice_del()}return!0}return!1}function _(){var e,s,r,n;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(L,96),A.limit_backward=s,e))switch(A.bra=A.cursor,e){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"u")?(n=A.limit-A.cursor,A.eq_s_b(1,"g")?A.cursor=A.limit-n:A.cursor=A.limit-r):A.cursor=A.limit-r,A.bra=A.cursor;case 2:A.slice_del()}}function h(){var e,s;if(A.ket=A.cursor,e=A.find_among_b(z,8))switch(A.bra=A.cursor,e){case 1:u()&&A.slice_del();break;case 2:u()&&(A.slice_del(),A.ket=A.cursor,A.eq_s_b(1,"u")&&(A.bra=A.cursor,s=A.limit-A.cursor,A.eq_s_b(1,"g")&&(A.cursor=A.limit-s,u()&&A.slice_del())))}}var v,p,g,k=[new s("",-1,6),new s("á",0,1),new s("é",0,2),new s("í",0,3),new s("ó",0,4),new s("ú",0,5)],y=[new s("la",-1,-1),new s("sela",0,-1),new s("le",-1,-1),new s("me",-1,-1),new s("se",-1,-1),new s("lo",-1,-1),new s("selo",5,-1),new s("las",-1,-1),new s("selas",7,-1),new s("les",-1,-1),new s("los",-1,-1),new s("selos",10,-1),new s("nos",-1,-1)],q=[new s("ando",-1,6),new s("iendo",-1,6),new s("yendo",-1,7),new s("ándo",-1,2),new s("iéndo",-1,1),new s("ar",-1,6),new s("er",-1,6),new s("ir",-1,6),new s("ár",-1,3),new s("ér",-1,4),new s("ír",-1,5)],C=[new s("ic",-1,-1),new s("ad",-1,-1),new s("os",-1,-1),new s("iv",-1,1)],P=[new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,1)],F=[new s("ic",-1,1),new s("abil",-1,1),new s("iv",-1,1)],S=[new s("ica",-1,1),new s("ancia",-1,2),new s("encia",-1,5),new s("adora",-1,2),new s("osa",-1,1),new s("ista",-1,1),new s("iva",-1,9),new s("anza",-1,1),new s("logía",-1,3),new s("idad",-1,8),new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,2),new s("mente",-1,7),new s("amente",13,6),new s("ación",-1,2),new s("ución",-1,4),new s("ico",-1,1),new s("ismo",-1,1),new s("oso",-1,1),new s("amiento",-1,1),new s("imiento",-1,1),new s("ivo",-1,9),new s("ador",-1,2),new s("icas",-1,1),new s("ancias",-1,2),new s("encias",-1,5),new s("adoras",-1,2),new s("osas",-1,1),new s("istas",-1,1),new s("ivas",-1,9),new s("anzas",-1,1),new s("logías",-1,3),new s("idades",-1,8),new s("ables",-1,1),new s("ibles",-1,1),new s("aciones",-1,2),new s("uciones",-1,4),new s("adores",-1,2),new s("antes",-1,2),new s("icos",-1,1),new s("ismos",-1,1),new s("osos",-1,1),new s("amientos",-1,1),new s("imientos",-1,1),new s("ivos",-1,9)],W=[new s("ya",-1,1),new s("ye",-1,1),new s("yan",-1,1),new s("yen",-1,1),new s("yeron",-1,1),new s("yendo",-1,1),new s("yo",-1,1),new s("yas",-1,1),new s("yes",-1,1),new s("yais",-1,1),new s("yamos",-1,1),new s("yó",-1,1)],L=[new s("aba",-1,2),new s("ada",-1,2),new s("ida",-1,2),new s("ara",-1,2),new s("iera",-1,2),new s("ía",-1,2),new s("aría",5,2),new s("ería",5,2),new s("iría",5,2),new s("ad",-1,2),new s("ed",-1,2),new s("id",-1,2),new s("ase",-1,2),new s("iese",-1,2),new s("aste",-1,2),new s("iste",-1,2),new s("an",-1,2),new s("aban",16,2),new s("aran",16,2),new s("ieran",16,2),new s("ían",16,2),new s("arían",20,2),new s("erían",20,2),new s("irían",20,2),new s("en",-1,1),new s("asen",24,2),new s("iesen",24,2),new s("aron",-1,2),new s("ieron",-1,2),new s("arán",-1,2),new s("erán",-1,2),new s("irán",-1,2),new s("ado",-1,2),new s("ido",-1,2),new s("ando",-1,2),new s("iendo",-1,2),new s("ar",-1,2),new s("er",-1,2),new s("ir",-1,2),new s("as",-1,2),new s("abas",39,2),new s("adas",39,2),new s("idas",39,2),new s("aras",39,2),new s("ieras",39,2),new s("ías",39,2),new s("arías",45,2),new s("erías",45,2),new s("irías",45,2),new s("es",-1,1),new s("ases",49,2),new s("ieses",49,2),new s("abais",-1,2),new s("arais",-1,2),new s("ierais",-1,2),new s("íais",-1,2),new s("aríais",55,2),new s("eríais",55,2),new s("iríais",55,2),new s("aseis",-1,2),new s("ieseis",-1,2),new s("asteis",-1,2),new s("isteis",-1,2),new s("áis",-1,2),new s("éis",-1,1),new s("aréis",64,2),new s("eréis",64,2),new s("iréis",64,2),new s("ados",-1,2),new s("idos",-1,2),new s("amos",-1,2),new s("ábamos",70,2),new s("áramos",70,2),new s("iéramos",70,2),new s("íamos",70,2),new s("aríamos",74,2),new s("eríamos",74,2),new s("iríamos",74,2),new s("emos",-1,1),new s("aremos",78,2),new s("eremos",78,2),new s("iremos",78,2),new s("ásemos",78,2),new s("iésemos",78,2),new s("imos",-1,2),new s("arás",-1,2),new s("erás",-1,2),new s("irás",-1,2),new s("ís",-1,2),new s("ará",-1,2),new s("erá",-1,2),new s("irá",-1,2),new s("aré",-1,2),new s("eré",-1,2),new s("iré",-1,2),new s("ió",-1,2)],z=[new s("a",-1,1),new s("e",-1,2),new s("o",-1,1),new s("os",-1,1),new s("á",-1,1),new s("é",-1,2),new s("í",-1,1),new s("ó",-1,1)],x=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10],A=new r;this.setCurrent=function(e){A.setCurrent(e)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return t(),A.limit_backward=e,A.cursor=A.limit,m(),A.cursor=A.limit,b()||(A.cursor=A.limit,f()||(A.cursor=A.limit,_())),A.cursor=A.limit,h(),A.cursor=A.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.es.stemmer,"stemmer-es"),e.es.stopWordFilter=e.generateStopWordFilter("a al algo algunas algunos ante antes como con contra cual cuando de del desde donde durante e el ella ellas ellos en entre era erais eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estamos estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos ha habida habidas habido habidos habiendo habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías han has hasta hay haya hayamos hayan hayas hayáis he hemos hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo la las le les lo los me mi mis mucho muchos muy más mí mía mías mío míos nada ni no nos nosotras nosotros nuestra nuestras nuestro nuestros o os otra otras otro otros para pero poco por porque que quien quienes qué se sea seamos sean seas seremos será serán serás seré seréis sería seríais seríamos serían serías seáis sido siendo sin sobre sois somos son soy su sus suya suyas suyo suyos sí también tanto te tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened tenemos tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías ti tiene tienen tienes todo todos tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú un una uno unos vosotras vosotros vuestra vuestras vuestro vuestros y ya yo él éramos".split(" ")),e.Pipeline.registerFunction(e.es.stopWordFilter,"stopWordFilter-es")}});
|
PypiClean
|
/yt-dlp-custom-0.0.1.tar.gz/yt-dlp-custom-0.0.1/yt_dlp/extractor/gronkh.py
|
import functools
from .common import InfoExtractor
from ..utils import (
OnDemandPagedList,
float_or_none,
traverse_obj,
unified_strdate,
)
class GronkhIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv/(?:watch/)?streams?/(?P<id>\d+)'
_TESTS = [{
'url': 'https://gronkh.tv/streams/657',
'info_dict': {
'id': '657',
'ext': 'mp4',
'title': 'H.O.R.D.E. - DAS ZWEiTE ZEiTALTER 🎲 Session 1',
'view_count': int,
'thumbnail': 'https://01.cdn.vod.farm/preview/9e2555d3a23bf4e5c5b7c6b3b70a9d84.jpg',
'upload_date': '20221111',
'chapters': 'count:3',
'duration': 31463,
},
'params': {'skip_download': True}
}, {
'url': 'https://gronkh.tv/stream/536',
'info_dict': {
'id': '536',
'ext': 'mp4',
'title': 'GTV0536, 2021-10-01 - MARTHA IS DEAD #FREiAB1830 !FF7 !horde !archiv',
'view_count': int,
'thumbnail': 'https://01.cdn.vod.farm/preview/6436746cce14e25f751260a692872b9b.jpg',
'upload_date': '20211001',
'duration': 32058,
},
'params': {'skip_download': True}
}, {
'url': 'https://gronkh.tv/watch/stream/546',
'only_matching': True,
}]
def _real_extract(self, url):
id = self._match_id(url)
data_json = self._download_json(f'https://api.gronkh.tv/v1/video/info?episode={id}', id)
m3u8_url = self._download_json(f'https://api.gronkh.tv/v1/video/playlist?episode={id}', id)['playlist_url']
formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, id)
if data_json.get('vtt_url'):
subtitles.setdefault('en', []).append({
'url': data_json['vtt_url'],
'ext': 'vtt',
})
return {
'id': id,
'title': data_json.get('title'),
'view_count': data_json.get('views'),
'thumbnail': data_json.get('preview_url'),
'upload_date': unified_strdate(data_json.get('created_at')),
'formats': formats,
'subtitles': subtitles,
'duration': float_or_none(data_json.get('source_length')),
'chapters': traverse_obj(data_json, (
'chapters', lambda _, v: float_or_none(v['offset']) is not None, {
'title': 'title',
'start_time': ('offset', {float_or_none}),
})) or None,
}
class GronkhFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv(?:/feed)?/?(?:#|$)'
IE_NAME = 'gronkh:feed'
_TESTS = [{
'url': 'https://gronkh.tv/feed',
'info_dict': {
'id': 'feed',
},
'playlist_count': 16,
}, {
'url': 'https://gronkh.tv',
'only_matching': True,
}]
def _entries(self):
for type_ in ('recent', 'views'):
info = self._download_json(
f'https://api.gronkh.tv/v1/video/discovery/{type_}', 'feed', note=f'Downloading {type_} API JSON')
for item in traverse_obj(info, ('discovery', ...)) or []:
yield self.url_result(f'https://gronkh.tv/watch/stream/{item["episode"]}', GronkhIE, item.get('title'))
def _real_extract(self, url):
return self.playlist_result(self._entries(), 'feed')
class GronkhVodsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv/vods/streams/?(?:#|$)'
IE_NAME = 'gronkh:vods'
_TESTS = [{
'url': 'https://gronkh.tv/vods/streams',
'info_dict': {
'id': 'vods',
},
'playlist_mincount': 150,
}]
_PER_PAGE = 25
def _fetch_page(self, page):
items = traverse_obj(self._download_json(
'https://api.gronkh.tv/v1/search', 'vods', query={'offset': self._PER_PAGE * page, 'first': self._PER_PAGE},
note=f'Downloading stream video page {page + 1}'), ('results', 'videos', ...))
for item in items or []:
yield self.url_result(f'https://gronkh.tv/watch/stream/{item["episode"]}', GronkhIE, item['episode'], item.get('title'))
def _real_extract(self, url):
entries = OnDemandPagedList(functools.partial(self._fetch_page), self._PER_PAGE)
return self.playlist_result(entries, 'vods')
|
PypiClean
|
/manic_xai-1.0.83-py3-none-any.whl/manic/Utility.py
|
import decimal
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
class Utility:
"""
Utility class for formatting counterfactuals, checking their validity, printing results, and computing the disagreement matrix.
@param data_instance: The instance to be explained.
@type data_instance: list
@param categories: A dictionary containing the possible categories for each categorical feature.
@type categories: dict
@param immutable_features: A set of indices of features that are immutable and cannot be changed in counterfactuals.
@type immutable_features: set
@param target_class: The target class that the counterfactuals aim to achieve.
@type target_class: int
@param verbose: An integer representing the verbosity level for printing messages during the counterfactual generation process.
@type verbose: int
@param predict_fn: The function used for predicting the class label of instances.
@type predict_fn: callable
@param disagreement: The disagreement object used for calculating disagreement scores.
@type disagreement: Disagreement
@param base_counterfactuals: The base counterfactuals used in the counterfactual generation process.
@type base_counterfactuals: list of list
@param labels: Labels for the base counterfactuals, used for result printing.
@type labels: list
"""
def __init__(self, data_instance, categories, immutable_features, target_class, verbose, predict_fn, disagreement, base_counterfactuals, labels):
self.data_instance = data_instance
self.categories = categories
self.immutable_features = immutable_features
self.target_class = target_class
self.verbose = verbose
self.predict_fn = predict_fn
self.disagreement = disagreement
self.base_counterfactuals = base_counterfactuals
self.labels = labels
def format_counterfactual(self, counterfactual):
"""
Format the counterfactual by rounding numerical values and converting decimal values to integers.
@param counterfactual: The counterfactual to be formatted.
@type counterfactual: list
@return: The formatted counterfactual.
@rtype: list
"""
formatted_counterfactual = []
for i in range(len(counterfactual)):
if i in self.categories:
formatted_counterfactual.append(round(counterfactual[i]))
else:
decimal_feature = decimal.Decimal(str(self.data_instance[i]))
decimal_places = decimal_feature.as_tuple().exponent * -1
if decimal_places == 0:
formatted_counterfactual.append(int(counterfactual[i]))
else:
formatted_counterfactual.append(round(counterfactual[i], decimal_places))
return formatted_counterfactual
def is_counterfactual_valid(self, counterfactual):
"""
Check the validity of a counterfactual.
@param counterfactual: The counterfactual to be validated.
@type counterfactual: list
@return: True if the counterfactual is valid, False otherwise.
@rtype: bool
"""
# Check if the counterfactual is not None
if counterfactual is None:
if self.verbose > 0:
print("Invalid Counterfactual: None value generated as counterfactual.")
return False
# Check if any immutable features are changed
for i in self.immutable_features:
if counterfactual[i] != self.data_instance[i]:
if self.verbose > 0:
print(f"Invalid Counterfactual: Feature at index {i} is immutable and cannot be changed.")
return False
# Check if the class is equal to the target class
prediction = self.predict_fn(counterfactual)
if prediction != self.target_class:
if self.verbose > 0:
print(f"Invalid Counterfactual: Predicted class ({prediction}) is not the target class ({self.target_class}).")
return False
# All conditions are met, counterfactual is valid
if self.verbose > 0:
print("Valid Counterfactual: No immutable features were changed and the counterfactual causes the correct prediction change.")
return True
def print_results(self, best_counterfactual, best_fitness, num_generations, generation_found, time_taken, time_found, cpu_cycles, proximity_score, sparsity_score, number_of_changes, disagreement_score):
"""
Print the results of the counterfactual generation process.
@param best_counterfactual: The best counterfactual found during the generation process.
@type best_counterfactual: list
@param best_fitness: The fitness score of the best counterfactual.
@type best_fitness: float
@param num_generations: The total number of generations that were evaluated during the process.
@type num_generations: int
@param generation_found: The generation at which the best counterfactual was found.
@type generation_found: int
@param time_taken: The total time taken to search for the counterfactual.
@type time_taken: float
@param time_found: The time taken to find the best counterfactual.
@type time_found: float
@param cpu_cycles: The total CPU cycles ran during the search process.
@type cpu_cycles: float
@param proximity_score: The proximity score between the meta-counterfactual and the instance explained.
@type cpu_cycles: float
@param sparsity_score: The sparsity score of the meta-counterfactual.
@type cpu_cycles: float
@param number_of_changes: The number of feature differences between the meta-counterfactual and the instance explained.
@type cpu_cycles: int
@param disagreement_score: The average disagreement score between the meta-counterfactual and the base counterfactuals.
@type cpu_cycles: float
"""
print("\n------ Counterfactual Generation Results ------")
if best_counterfactual is not None:
print(f"{np.array2string(np.array(best_counterfactual), separator=', ')}: Best Counterfactual 👑")
print(f"{np.array2string(np.array(self.data_instance), separator=', ')}: Instance Explained 🔍")
for i, counterfactual in enumerate(self.base_counterfactuals):
print(f"{np.array2string(counterfactual, separator=', ')}: {self.labels[i]}")
print("Proximity from Data Instance:", proximity_score)
print("Sparsity:", sparsity_score)
print("Number of changes made to produce the counterfactual:", number_of_changes)
print("Disagreement Score against Base Counterfactuals:", disagreement_score)
print("Number of Generations:", num_generations)
print(f"Counterfactual found after {generation_found + 1} generations")
print("Fitness Score:", best_fitness)
print(f"Time taken to find counterfactual: {time_found:.4f} seconds")
print(f"Total time searched: {time_taken:.4f} seconds")
print(f"Total CPU cycles ran: {cpu_cycles:.4f}")
else:
print("No valid counterfactual found within the specified number of generations.")
print("Try increasing the number of generations or population size and/or altering alpha, beta and/or perturbation_fraction. As a last resort, you can also try changing the seed.")
print("------ End of Results ------\n")
def __str__(self):
"""
Return a string representation of the Utility object.
@return: String representation of the Utility object.
@rtype: str
"""
predict_fn_name = self.predict_fn.__name__ if self.predict_fn else self.predict_fn
return f"Utility Object:\n" \
f"Data Instance: {self.data_instance}\n" \
f"Categories: {self.categories}\n" \
f"Immutable Features: {self.immutable_features}\n" \
f"Target Class: {self.target_class}\n" \
f"Verbose: {self.verbose}\n" \
f"Predict Function: {predict_fn_name}\n" \
f"Disagreement Object: {self.disagreement}\n" \
f"Base Counterfactuals: {self.base_counterfactuals}\n" \
f"Labels: {self.labels}\n"
def to_string(self):
"""
Return a string representation of the Utility object.
@return: String representation of the Utility object.
@rtype: str
"""
return str(self)
def compute_disagreement_matrix(self, counterfactuals, agreement):
"""
Compute the disagreement matrix between counterfactuals.
@param counterfactuals: The list of counterfactuals for which the disagreement matrix is computed.
@type counterfactuals: list of list
@param agreement: Boolean flag to compute the agreement matrix instead of disagreement matrix.
@type agreement: bool
@return: The disagreement matrix or agreement matrix.
@rtype: numpy.ndarray
"""
n = len(counterfactuals)
disagreement_matrix = np.zeros((n, n), dtype=float)
for i in range(n):
for j in range(n):
disagreement_score = self.disagreement.calculate_disagreement(counterfactuals[i], counterfactuals[j])
if agreement:
disagreement_score = 1 - disagreement_score
disagreement_matrix[i, j] = disagreement_score
disagreement_matrix[j, i] = disagreement_score
return disagreement_matrix
def plot_agreement_heatmap(self, agreement=True, dataset_name=None):
"""
Plot a heatmap of the agreement or disagreement matrix.
@param agreement: Boolean flag to plot the agreement heatmap (True) or disagreement heatmap (False).
@type agreement: bool
"""
disagreement_matrix = self.compute_disagreement_matrix(self.base_counterfactuals, agreement)
plt.figure(figsize=(len(self.labels), len(self.labels)))
sns.heatmap(disagreement_matrix, annot=True, cmap=sns.cubehelix_palette(as_cmap=True), xticklabels=self.labels, yticklabels=self.labels)
plt.xlabel('Counterfactual')
plt.ylabel('Counterfactual')
title = "Pairwise Agreement"
if(dataset_name != None):
title += f" for the {dataset_name} Dataset"
plt.title(title)
plt.show()
|
PypiClean
|
/pulumi_artifactory-4.6.0a1693459609.tar.gz/pulumi_artifactory-4.6.0a1693459609/pulumi_artifactory/proxy.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProxyArgs', 'Proxy']
@pulumi.input_type
class ProxyArgs:
def __init__(__self__, *,
host: pulumi.Input[str],
key: pulumi.Input[str],
port: pulumi.Input[int],
nt_domain: Optional[pulumi.Input[str]] = None,
nt_host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
platform_default: Optional[pulumi.Input[bool]] = None,
redirect_to_hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Proxy resource.
:param pulumi.Input[str] host: The name of the proxy host.
:param pulumi.Input[str] key: The unique ID of the proxy.
:param pulumi.Input[int] port: The proxy port number.
:param pulumi.Input[str] nt_domain: The proxy domain/realm name.
:param pulumi.Input[str] nt_host: The computer name of the machine (the machine connecting to the NTLM proxy).
:param pulumi.Input[str] password: The proxy password when authentication credentials are required.
:param pulumi.Input[bool] platform_default: When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
:param pulumi.Input[Sequence[pulumi.Input[str]]] redirect_to_hosts: An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
:param pulumi.Input[str] username: The proxy username when authentication credentials are required.
"""
pulumi.set(__self__, "host", host)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "port", port)
if nt_domain is not None:
pulumi.set(__self__, "nt_domain", nt_domain)
if nt_host is not None:
pulumi.set(__self__, "nt_host", nt_host)
if password is not None:
pulumi.set(__self__, "password", password)
if platform_default is not None:
pulumi.set(__self__, "platform_default", platform_default)
if redirect_to_hosts is not None:
pulumi.set(__self__, "redirect_to_hosts", redirect_to_hosts)
if services is not None:
pulumi.set(__self__, "services", services)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def host(self) -> pulumi.Input[str]:
"""
The name of the proxy host.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: pulumi.Input[str]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The unique ID of the proxy.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
The proxy port number.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="ntDomain")
def nt_domain(self) -> Optional[pulumi.Input[str]]:
"""
The proxy domain/realm name.
"""
return pulumi.get(self, "nt_domain")
@nt_domain.setter
def nt_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nt_domain", value)
@property
@pulumi.getter(name="ntHost")
def nt_host(self) -> Optional[pulumi.Input[str]]:
"""
The computer name of the machine (the machine connecting to the NTLM proxy).
"""
return pulumi.get(self, "nt_host")
@nt_host.setter
def nt_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nt_host", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The proxy password when authentication credentials are required.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="platformDefault")
def platform_default(self) -> Optional[pulumi.Input[bool]]:
"""
When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
"""
return pulumi.get(self, "platform_default")
@platform_default.setter
def platform_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "platform_default", value)
@property
@pulumi.getter(name="redirectToHosts")
def redirect_to_hosts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
"""
return pulumi.get(self, "redirect_to_hosts")
@redirect_to_hosts.setter
def redirect_to_hosts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "redirect_to_hosts", value)
@property
@pulumi.getter
def services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
"""
return pulumi.get(self, "services")
@services.setter
def services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "services", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
The proxy username when authentication credentials are required.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
@pulumi.input_type
class _ProxyState:
def __init__(__self__, *,
host: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
nt_domain: Optional[pulumi.Input[str]] = None,
nt_host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
platform_default: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
redirect_to_hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Proxy resources.
:param pulumi.Input[str] host: The name of the proxy host.
:param pulumi.Input[str] key: The unique ID of the proxy.
:param pulumi.Input[str] nt_domain: The proxy domain/realm name.
:param pulumi.Input[str] nt_host: The computer name of the machine (the machine connecting to the NTLM proxy).
:param pulumi.Input[str] password: The proxy password when authentication credentials are required.
:param pulumi.Input[bool] platform_default: When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
:param pulumi.Input[int] port: The proxy port number.
:param pulumi.Input[Sequence[pulumi.Input[str]]] redirect_to_hosts: An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
:param pulumi.Input[str] username: The proxy username when authentication credentials are required.
"""
if host is not None:
pulumi.set(__self__, "host", host)
if key is not None:
pulumi.set(__self__, "key", key)
if nt_domain is not None:
pulumi.set(__self__, "nt_domain", nt_domain)
if nt_host is not None:
pulumi.set(__self__, "nt_host", nt_host)
if password is not None:
pulumi.set(__self__, "password", password)
if platform_default is not None:
pulumi.set(__self__, "platform_default", platform_default)
if port is not None:
pulumi.set(__self__, "port", port)
if redirect_to_hosts is not None:
pulumi.set(__self__, "redirect_to_hosts", redirect_to_hosts)
if services is not None:
pulumi.set(__self__, "services", services)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
The name of the proxy host.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID of the proxy.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="ntDomain")
def nt_domain(self) -> Optional[pulumi.Input[str]]:
"""
The proxy domain/realm name.
"""
return pulumi.get(self, "nt_domain")
@nt_domain.setter
def nt_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nt_domain", value)
@property
@pulumi.getter(name="ntHost")
def nt_host(self) -> Optional[pulumi.Input[str]]:
"""
The computer name of the machine (the machine connecting to the NTLM proxy).
"""
return pulumi.get(self, "nt_host")
@nt_host.setter
def nt_host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nt_host", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The proxy password when authentication credentials are required.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="platformDefault")
def platform_default(self) -> Optional[pulumi.Input[bool]]:
"""
When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
"""
return pulumi.get(self, "platform_default")
@platform_default.setter
def platform_default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "platform_default", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The proxy port number.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="redirectToHosts")
def redirect_to_hosts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
"""
return pulumi.get(self, "redirect_to_hosts")
@redirect_to_hosts.setter
def redirect_to_hosts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "redirect_to_hosts", value)
@property
@pulumi.getter
def services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
"""
return pulumi.get(self, "services")
@services.setter
def services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "services", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
The proxy username when authentication credentials are required.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class Proxy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
nt_domain: Optional[pulumi.Input[str]] = None,
nt_host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
platform_default: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
redirect_to_hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an Artifactory Proxy resource.
This resource configuration corresponds to 'proxies' config block in system configuration XML
(REST endpoint: [artifactory/api/system/configuration](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GeneralConfiguration)).
~>The `Proxy` resource utilizes endpoints which are blocked/removed in SaaS environments (i.e. in Artifactory online), rendering this resource incompatible with Artifactory SaaS environments.
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
my_proxy = artifactory.Proxy("my-proxy",
host="my-proxy.mycompany.com",
key="my-proxy",
nt_domain="MYCOMPANY",
nt_host="MYCOMPANY.COM",
password="password",
platform_default=False,
port=8888,
redirect_to_hosts=["redirec-host.mycompany.com"],
services=[
"jfrt",
"jfxr",
],
username="user1")
```
## Import
Current Proxy can be imported using `proxy-key` from Artifactory as the `ID`, e.g.
```sh
$ pulumi import artifactory:index/proxy:Proxy my-proxy proxy-key
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] host: The name of the proxy host.
:param pulumi.Input[str] key: The unique ID of the proxy.
:param pulumi.Input[str] nt_domain: The proxy domain/realm name.
:param pulumi.Input[str] nt_host: The computer name of the machine (the machine connecting to the NTLM proxy).
:param pulumi.Input[str] password: The proxy password when authentication credentials are required.
:param pulumi.Input[bool] platform_default: When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
:param pulumi.Input[int] port: The proxy port number.
:param pulumi.Input[Sequence[pulumi.Input[str]]] redirect_to_hosts: An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
:param pulumi.Input[str] username: The proxy username when authentication credentials are required.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProxyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an Artifactory Proxy resource.
This resource configuration corresponds to 'proxies' config block in system configuration XML
(REST endpoint: [artifactory/api/system/configuration](https://www.jfrog.com/confluence/display/JFROG/Artifactory+REST+API#ArtifactoryRESTAPI-GeneralConfiguration)).
~>The `Proxy` resource utilizes endpoints which are blocked/removed in SaaS environments (i.e. in Artifactory online), rendering this resource incompatible with Artifactory SaaS environments.
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
my_proxy = artifactory.Proxy("my-proxy",
host="my-proxy.mycompany.com",
key="my-proxy",
nt_domain="MYCOMPANY",
nt_host="MYCOMPANY.COM",
password="password",
platform_default=False,
port=8888,
redirect_to_hosts=["redirec-host.mycompany.com"],
services=[
"jfrt",
"jfxr",
],
username="user1")
```
## Import
Current Proxy can be imported using `proxy-key` from Artifactory as the `ID`, e.g.
```sh
$ pulumi import artifactory:index/proxy:Proxy my-proxy proxy-key
```
:param str resource_name: The name of the resource.
:param ProxyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProxyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
host: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
nt_domain: Optional[pulumi.Input[str]] = None,
nt_host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
platform_default: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
redirect_to_hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProxyArgs.__new__(ProxyArgs)
if host is None and not opts.urn:
raise TypeError("Missing required property 'host'")
__props__.__dict__["host"] = host
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["nt_domain"] = nt_domain
__props__.__dict__["nt_host"] = nt_host
__props__.__dict__["password"] = None if password is None else pulumi.Output.secret(password)
__props__.__dict__["platform_default"] = platform_default
if port is None and not opts.urn:
raise TypeError("Missing required property 'port'")
__props__.__dict__["port"] = port
__props__.__dict__["redirect_to_hosts"] = redirect_to_hosts
__props__.__dict__["services"] = services
__props__.__dict__["username"] = username
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["password"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(Proxy, __self__).__init__(
'artifactory:index/proxy:Proxy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
host: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
nt_domain: Optional[pulumi.Input[str]] = None,
nt_host: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
platform_default: Optional[pulumi.Input[bool]] = None,
port: Optional[pulumi.Input[int]] = None,
redirect_to_hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'Proxy':
"""
Get an existing Proxy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] host: The name of the proxy host.
:param pulumi.Input[str] key: The unique ID of the proxy.
:param pulumi.Input[str] nt_domain: The proxy domain/realm name.
:param pulumi.Input[str] nt_host: The computer name of the machine (the machine connecting to the NTLM proxy).
:param pulumi.Input[str] password: The proxy password when authentication credentials are required.
:param pulumi.Input[bool] platform_default: When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
:param pulumi.Input[int] port: The proxy port number.
:param pulumi.Input[Sequence[pulumi.Input[str]]] redirect_to_hosts: An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
:param pulumi.Input[str] username: The proxy username when authentication credentials are required.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ProxyState.__new__(_ProxyState)
__props__.__dict__["host"] = host
__props__.__dict__["key"] = key
__props__.__dict__["nt_domain"] = nt_domain
__props__.__dict__["nt_host"] = nt_host
__props__.__dict__["password"] = password
__props__.__dict__["platform_default"] = platform_default
__props__.__dict__["port"] = port
__props__.__dict__["redirect_to_hosts"] = redirect_to_hosts
__props__.__dict__["services"] = services
__props__.__dict__["username"] = username
return Proxy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def host(self) -> pulumi.Output[str]:
"""
The name of the proxy host.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
The unique ID of the proxy.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="ntDomain")
def nt_domain(self) -> pulumi.Output[Optional[str]]:
"""
The proxy domain/realm name.
"""
return pulumi.get(self, "nt_domain")
@property
@pulumi.getter(name="ntHost")
def nt_host(self) -> pulumi.Output[Optional[str]]:
"""
The computer name of the machine (the machine connecting to the NTLM proxy).
"""
return pulumi.get(self, "nt_host")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The proxy password when authentication credentials are required.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="platformDefault")
def platform_default(self) -> pulumi.Output[Optional[bool]]:
"""
When set, this proxy will be the default proxy for new remote repositories and for internal HTTP requests issued by Artifactory. Will also be used as proxy for all other services in the platform (for example: Xray, Distribution, etc).
"""
return pulumi.get(self, "platform_default")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
The proxy port number.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="redirectToHosts")
def redirect_to_hosts(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
An optional list of host names to which this proxy may redirect requests. The credentials defined for the proxy are reused by requests redirected to all of these hosts.
"""
return pulumi.get(self, "redirect_to_hosts")
@property
@pulumi.getter
def services(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
An optional list of services names to which this proxy be the default of. The options are `jfrt`, `jfmc`, `jfxr`, `jfds`.
"""
return pulumi.get(self, "services")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
"""
The proxy username when authentication credentials are required.
"""
return pulumi.get(self, "username")
|
PypiClean
|
/ipodshuffle-0.4.1-py3-none-any.whl/teresa/show.py
|
import os
import pprint
from ipodshuffle.db import Shuffle as ShuffleDB
L1 = '=' * 80
L2 = '-' * 50
def show(args):
base = args.base
_ctrl = 'iPod_Control'
_itunessd_chunk = open(base + '/' + _ctrl + '/iTunes/iTunesSD', 'rb').read()
_itunesstats_chunk = None
_itunesstats_path = base + '/' + _ctrl + '/iTunes/iTunesStats'
if os.path.exists(_itunesstats_path):
_itunesstats_chunk = open(_itunesstats_path, 'rb').read()
shuffledb = ShuffleDB(_itunessd_chunk, _itunesstats_chunk)
print(L1)
print('enable_voiceover: ', shuffledb.enable_voiceover)
print('max_volumex: ', shuffledb.max_volume)
print('number of playlists: ', len(shuffledb.playlists))
print(L1)
print()
print('Tracks:')
print(L1)
for track in shuffledb.tracks:
print('INDEX: ', shuffledb.tracks.index(track))
itunes_sd, itunes_stats = track.get_dics()
print('ITUNES_SD:')
pprint.pprint(itunes_sd)
print('ITUNES_STATS:')
pprint.pprint(itunes_stats)
if track != shuffledb.tracks[-1]:
print(L2)
print(L1)
print()
print('Playlists:')
print(L1)
for pl in shuffledb.playlists:
print('type: ', pl.type)
print('index of tracks: ', pl.indexes_of_tracks)
print('dbid: ', pl.dbid)
if pl != shuffledb.playlists[-1]:
print(L2)
print(L1)
def register(parser):
import argparse
from .utils import add_optional_group, add_args_help, add_args_ipod_base
from . import translate as _
parser_show = parser.add_parser('show', help=_('show ipod low level DB'),
formatter_class=argparse.RawTextHelpFormatter,
epilog=_('Example of use:') + '\n' +
' %(prog)s -b /media/ipod_base',
add_help=False
)
optionl_group = add_optional_group(parser_show)
add_args_help(optionl_group)
add_args_ipod_base(optionl_group)
optionl_group.set_defaults(func=show)
|
PypiClean
|
/xerparser-0.10.3.tar.gz/xerparser-0.10.3/README.md
|
# xerparser
Read the contents of a P6 .xer file and convert it into a Python object.
*Disclaimers:
It's helpfull if you are already familiar with the mapping and schemas used by P6 during the export process.
Refer to the [Oracle Documentation]( https://docs.oracle.com/cd/F25600_01/English/Mapping_and_Schema/xer_import_export_data_map_project/index.htm) for more information regarding how data is mapped to the XER format.
Tested on .xer files exported as versions 15.2 through 19.12.*
<br/>
## Install
**Windows**:
```bash
pip install xerparser
```
**Linux/Mac**:
```bash
pip3 install xerparser
```
<br/>
## Usage
Import the `Xer` class from `xerparser` and pass the contents of a .xer file as an argument. Use the `Xer` class variable `CODEC` to set the proper encoding to decode the file.
```python
from xerparser import Xer
file = r"/path/to/file.xer"
with open(file, encoding=Xer.CODEC, errors="ignore") as f:
file_contents = f.read()
xer = Xer(file_contents)
```
Do not pass the the .xer file directly as an argument to the `Xer` class. The file must be decoded and read into a string, which can then be passed as an argument. Or, pass the .xer file into the `Xer.reader` classmethod, which accepts:
* str or pathlib.Path objects for files stored locally or on a server.
* Binary files from requests, Flask, FastAPI, etc...
```python
from xerparser import Xer
file = r"/path/to/file.xer"
xer = Xer.reader(file)
```
<br/>
## Attributes
The tables stored in the .xer file are accessable as either Global, Project specific, Task specific, or Resource specific:
### Global
```python
xer.export_info # export data
xer.activity_code_types # dict of ACTVTYPE objects
xer.activity_code_values # dict of ACTVCODE objects
xer.calendars # dict of all CALENDAR objects
xer.financial_periods # dict of FINDATES objects
xer.notebook_topics # dict of MEMOTYPE objects
xer.projects # dict of PROJECT objects
xer.project_code_types # dict of PCATTYPE objects
xer.project_code_values # dict of PCATVAL objects
xer.tasks # dict of all TASK objects
xer.relationships # dict of all TASKPRED objects
xer.resources # dict of RSRC objects
xer.udf_types # dict of UDFTYPE objects
xer.wbs_nodes # dict of all PROJWBS objects
```
### Project Specific
```python
# Get first project
project = list(xer.projects.values())[0]
project.activity_codes # list of project specific ACTVTYPE objects
project.calendars # list of project specific CALENDAR objects
project.project_codes # dict of PCATTYPE: PCATVAL objects
project.tasks # list of project specific TASK objects
project.relationships # list of project specific TASKPRED objects
project.user_defined_fields # dict of `UDFTYPE`: `UDF Value` pairs
project.wbs_nodes # list of project specific PROJWBS objects
```
### Task Specific
```python
# Get first task
task = project.tasks[0]
task.activity_codes # dict of ACTVTYPE: ACTVCODE objects
task.memos # list of TASKMEMO objects
task.periods # list of TASKFIN objects
task.resources # dict of TASKRSRC objects
task.user_defined_fields # dict of `UDFTYPE`: `UDF Value` pairs
```
### Resource Specific
```python
# Get first task resource
resource = list(task.resources.values())[0]
resource.periods # list of TRSRCFIN objects
resource.user_defined_fields # dict of `UDFTYPE`: `UDF Value` pairs
```
<br/>
## Error Checking
Sometimes the xer file is corrupted during the export process. If this is the case, a `CorruptXerFile` Exception will be raised during initialization. A list of the errors can be accessed from the `CorruptXerFile` Exception, or by using the `find_xer_errors` function.
### Option 1 - `errors` attribute of `CorruptXerFile` exception (preferred)
```python
from xerparser import Xer, CorruptXerFile
file = r"/path/to/file.xer"
try:
xer = Xer.reader(file)
except CorruptXerFile as e:
for error in e.errors:
print(error)
```
### Option 2 - `find_xer_errors` function
```python
from xerparser import parser, file_reader, find_xer_errors
file = r"/path/to/file.xer"
xer_data = parser(file_reader(file))
file_errors = find_xer_errors(xer_data)
for error in file_errors:
print(error)
```
### Errors
- Minimum required tables - an error is recorded if one of the following tables is missing:
- CALENDAR
- PROJECT
- PROJWBS
- TASK
- TASKPRED
- Required table pairs - an error is recorded if Table 1 is included but not Table 2:
| Table 1 | Table 2 | Notes |
| :----------- |:-------------|----------|
| TASKFIN | FINDATES | *Financial Period Data for Task* |
| TRSRCFIN | FINDATES | *Financial Period Data for Task Resource* |
| TASKRSRC | RSRC | *Resource Data* |
| TASKMEMO | MEMOTYPE | *Notebook Data* |
| ACTVCODE | ACTVTYPE | *Activity Code Data* |
| TASKACTV | ACTVCODE | *Activity Code Data* |
| PCATVAL | PCATTYPE | *Project Code Data* |
| PROJPCAT | PCATVAL | *Project Code Data* |
| UDFVALUE | UDFTYPE | *User Defined Field Data* |
- Non-existent calendars assigned to tasks.
- Non-existent resources assigned to task resources.
|
PypiClean
|
/tencentcloud-sdk-python-intl-en-3.0.786.tar.gz/tencentcloud-sdk-python-intl-en-3.0.786/tencentcloud/trtc/v20190722/errorcodes.py
|
# CAM signature/authentication error.
AUTHFAILURE = 'AuthFailure'
# Identity verification has not been completed, so this operation is not allowed.
AUTHFAILURE_UNREALNAMEAUTHENTICATED = 'AuthFailure.UnRealNameAuthenticated'
# CAM authentication failed.
AUTHFAILURE_UNAUTHORIZEDOPERATION = 'AuthFailure.UnauthorizedOperation'
# Unsupported operation.
AUTHFAILURE_UNSUPPORTEDOPERATION = 'AuthFailure.UnsupportedOperation'
# Operation failed.
FAILEDOPERATION = 'FailedOperation'
# Unsupported on-cloud recording method.
FAILEDOPERATION_CRUNSUPPORTMETHOD = 'FailedOperation.CRUnsupportMethod'
# Maximum number of concurrent on-cloud recording tasks reached. Contact us to raise the limit.
FAILEDOPERATION_RESTRICTEDCONCURRENCY = 'FailedOperation.RestrictedConcurrency'
# The room does not exist.
FAILEDOPERATION_ROOMNOTEXIST = 'FailedOperation.RoomNotExist'
# The application ID does not exist.
FAILEDOPERATION_SDKAPPIDNOTEXIST = 'FailedOperation.SdkAppIdNotExist'
# The user is not in the room.
FAILEDOPERATION_USERNOTEXIST = 'FailedOperation.UserNotExist'
# Internal error.
INTERNALERROR = 'InternalError'
# On-cloud recording internal error.
INTERNALERROR_CRINTERNALERROR = 'InternalError.CRInternalError'
# An error occurred while querying the database.
INTERNALERROR_DBERROR = 'InternalError.DBError'
# An error occurred during an ES query.
INTERNALERROR_ESQUERYERROR = 'InternalError.EsQueryError'
# Failed to query the room.
INTERNALERROR_GETROOMCACHEIPERROR = 'InternalError.GetRoomCacheIpError'
# Failed to get room information.
INTERNALERROR_GETROOMFROMCACHEERROR = 'InternalError.GetRoomFromCacheError'
# Failed to parse the HTTP request.
INTERNALERROR_HTTPPARASEFALIED = 'InternalError.HttpParaseFalied'
# API error.
INTERNALERROR_INTERFACEERR = 'InternalError.InterfaceErr'
# Unsupported method.
INTERNALERROR_METHODERR = 'InternalError.MethodErr'
# The user is not in the room.
INTERNALERROR_USERNOTEXIST = 'InternalError.UserNotExist'
# Parameter error.
INVALIDPARAMETER = 'InvalidParameter'
# Failed to parse body parameters.
INVALIDPARAMETER_BODYPARAMSERROR = 'InvalidParameter.BodyParamsError'
# Invalid `EncodeParams`.
INVALIDPARAMETER_ENCODEPARAMS = 'InvalidParameter.EncodeParams'
# Invalid `EndTs`.
INVALIDPARAMETER_ENDTS = 'InvalidParameter.EndTs'
# Parameter value is out of range.
INVALIDPARAMETER_OUTOFRANGE = 'InvalidParameter.OutOfRange'
# Invalid `PageNumber`.
INVALIDPARAMETER_PAGENUMBER = 'InvalidParameter.PageNumber'
# Invalid `PageSize`.
INVALIDPARAMETER_PAGESIZE = 'InvalidParameter.PageSize'
# The value of `PageSize` exceeds 100.
INVALIDPARAMETER_PAGESIZEOVERSIZE = 'InvalidParameter.PageSizeOversize'
# The query period exceeds the limit.
INVALIDPARAMETER_QUERYSCALEOVERSIZE = 'InvalidParameter.QueryScaleOversize'
# `RoomId` is incorrect.
INVALIDPARAMETER_ROOMID = 'InvalidParameter.RoomId'
# `SdkAppId` is incorrect.
INVALIDPARAMETER_SDKAPPID = 'InvalidParameter.SdkAppId'
# The start time for query exceeded the limit.
INVALIDPARAMETER_STARTTIMEEXPIRE = 'InvalidParameter.StartTimeExpire'
# The query start time exceeds the range allowed by the current dashboard edition. For details, see https://intl.cloud.tencent.com/document/product/647/81331?from_cn_redirect=1
INVALIDPARAMETER_STARTTIMEOVERSIZE = 'InvalidParameter.StartTimeOversize'
# Invalid `StartTs`.
INVALIDPARAMETER_STARTTS = 'InvalidParameter.StartTs'
# The start time for query exceeded the limit.
INVALIDPARAMETER_STARTTSOVERSIZE = 'InvalidParameter.StartTsOversize'
# Failed to parse URL parameters.
INVALIDPARAMETER_URLPARAMSERROR = 'InvalidParameter.UrlParamsError'
# Invalid `UserId`.
INVALIDPARAMETER_USERID = 'InvalidParameter.UserId'
# `UserIds` is incorrect.
INVALIDPARAMETER_USERIDS = 'InvalidParameter.UserIds'
# The number of users exceeds 6.
INVALIDPARAMETER_USERIDSMORETHANSIX = 'InvalidParameter.UserIdsMorethanSix'
# Invalid RoomId.
INVALIDPARAMETERVALUE_ROOMID = 'InvalidParameterValue.RoomId'
# Missing parameter.
MISSINGPARAMETER = 'MissingParameter'
# `AccessKey` parameter missing.
MISSINGPARAMETER_ACCESSKEY = 'MissingParameter.AccessKey'
# `AppId` missing.
MISSINGPARAMETER_APPID = 'MissingParameter.AppId'
# `Bucket` parameter missing.
MISSINGPARAMETER_BUCKET = 'MissingParameter.Bucket'
# `CloudStorage` parameter missing.
MISSINGPARAMETER_CLOUDSTORAGE = 'MissingParameter.CloudStorage'
# `CommId` is missing.
MISSINGPARAMETER_COMMID = 'MissingParameter.CommId'
# `SdkAppId` or `CommID` missing.
MISSINGPARAMETER_COMMIDORSDKAPPID = 'MissingParameter.CommIdOrSdkAppId'
# `endTS_s` is missing.
MISSINGPARAMETER_ENDTS = 'MissingParameter.EndTs'
# `RecordMode` parameter missing.
MISSINGPARAMETER_RECORDMODE = 'MissingParameter.RecordMode'
# `RecordParams` parameter missing.
MISSINGPARAMETER_RECORDPARAMS = 'MissingParameter.RecordParams'
# `Region` parameter missing.
MISSINGPARAMETER_REGION = 'MissingParameter.Region'
# `RoomId` is missing.
MISSINGPARAMETER_ROOMID = 'MissingParameter.RoomId'
# `RoomNum` is missing.
MISSINGPARAMETER_ROOMNUM = 'MissingParameter.RoomNum'
# `SdkAppId` is missing.
MISSINGPARAMETER_SDKAPPID = 'MissingParameter.SdkAppId'
# `SecretKey` parameter missing.
MISSINGPARAMETER_SECRETKEY = 'MissingParameter.SecretKey'
# `startTS_s` is missing.
MISSINGPARAMETER_STARTTS = 'MissingParameter.StartTs'
# `StorageParams` parameter missing.
MISSINGPARAMETER_STORAGEPARAMS = 'MissingParameter.StorageParams'
# `StreamType` parameter missing.
MISSINGPARAMETER_STREAMTYPE = 'MissingParameter.StreamType'
# `TaskId` parameter missing.
MISSINGPARAMETER_TASKID = 'MissingParameter.TaskId'
# Missing `UserId` parameter.
MISSINGPARAMETER_USERID = 'MissingParameter.UserId'
# `UserIds` is missing.
MISSINGPARAMETER_USERIDS = 'MissingParameter.UserIds'
# `UserSig` parameter missing.
MISSINGPARAMETER_USERSIG = 'MissingParameter.UserSig'
# `Vendor` parameter missing.
MISSINGPARAMETER_VENDOR = 'MissingParameter.Vendor'
# The resource does not exist.
RESOURCENOTFOUND = 'ResourceNotFound'
# No permission to manipulate `SdkAppId`.
UNAUTHORIZEDOPERATION_SDKAPPID = 'UnauthorizedOperation.SdkAppId'
# Unsupported operation.
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
PypiClean
|
/testeCryptoguardian-0.0.1.tar.gz/testeCryptoguardian-0.0.1/README.md
|
# CryptoGuardian
CryptoGuardian é um pacote Python que fornece uma ferramenta simples e poderosa de criptografia e descriptografia de arquivos. Ele permite que você proteja seus arquivos confidenciais com criptografia forte usando a biblioteca de criptografia Fernet, ao mesmo tempo em que fornece uma interface gráfica construída com PyQt5.
## Características
- **Criptografia de arquivo**: criptografe facilmente seus arquivos para proteger dados confidenciais.
- **Descriptografia de arquivo**: Descriptografe arquivos criptografados anteriormente quando precisar acessar seu conteúdo.
- **Gerenciamento de senhas**: gere automaticamente chaves de criptografia fortes e copie-as para a área de transferência para proteção.
- **Interface Gráfica Intuitiva**: A interface gráfica do usuário torna mais fácil para usuários de todos os níveis de habilidade criptografar e descriptografar arquivos.
### Instalação
Você pode instalar o CryptoGuardian via `pip`:
```bash
pip install cryptoguardian
```
### Uso
import cryptoguardian
# Crie uma instância do CryptoGuardian
guardian = cryptoguardian.CryptoGuardian()
# Criptografar um arquivo
guardian.encrypt_file()
# Descriptografar um arquivo
guardian.decrypt_file()
### Licença
O CryptoGuardian é distribuído sob a [Licença MIT](https://opensource.org/licenses/MIT). Você é livre para usar, modificar e distribuir este pacote de acordo com os termos da **[licença](https://opensource.org/licenses/MIT).**
|
PypiClean
|
/w-0.11.1.tar.gz/w-0.11.1/wallace/nodes.py
|
from wallace.models import Node, Info
from wallace.information import State
from sqlalchemy import Integer
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.expression import cast
from operator import attrgetter
import random
class Agent(Node):
"""A Node with fitness."""
__mapper_args__ = {"polymorphic_identity": "agent"}
@hybrid_property
def fitness(self):
"""Endow agents with a numerical fitness."""
try:
return float(self.property1)
except TypeError:
return None
@fitness.setter
def fitness(self, fitness):
"""Assign fitness to property1."""
self.property1 = repr(fitness)
@fitness.expression
def fitness(self):
"""Retrieve fitness via property1."""
return cast(self.property1, Integer)
class ReplicatorAgent(Agent):
"""An agent that copies incoming transmissions."""
__mapper_args__ = {"polymorphic_identity": "replicator_agent"}
def update(self, infos):
"""Replicate the incoming information."""
for info_in in infos:
self.replicate(info_in=info_in)
class Source(Node):
"""An AI Node that only sends transmissions.
By default, when asked to transmit, a Source creates and sends
a new Info. Sources cannot receive transmissions.
"""
__mapper_args__ = {"polymorphic_identity": "generic_source"}
def _what(self):
"""What to transmit by default."""
return self.create_information()
def create_information(self):
"""Create new infos on demand."""
info = self._info_type()(
origin=self,
contents=self._contents())
return info
def _info_type(self):
"""The type of info to be created."""
return Info
def _contents(self):
"""The contents of new infos."""
raise NotImplementedError(
"{}.contents() needs to be defined.".format(type(self)))
def receive(self, what):
"""Raise an error if asked to receive a transmission."""
raise Exception("Sources cannot receive transmissions.")
class RandomBinaryStringSource(Source):
"""A source that transmits random binary strings."""
__mapper_args__ = {"polymorphic_identity": "random_binary_string_source"}
def _contents(self):
"""Generate a random binary string."""
return "".join([str(random.randint(0, 1)) for i in range(2)])
class Environment(Node):
"""A node with a state."""
__mapper_args__ = {"polymorphic_identity": "environment"}
def state(self, time=None):
"""The most recently-created info of type State at the specfied time.
If time is None then it returns the most recent state as of now.
"""
if time is None:
return max(self.infos(type=State), key=attrgetter('creation_time'))
else:
states = [
s for s in self.infos(type=State) if s.creation_time < time]
return max(states, key=attrgetter('creation_time'))
def _what(self):
"""Return the most recent state."""
return self.state()
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/securityinsights/v20200101/incident.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Incident']
class Incident(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
classification: Optional[pulumi.Input[Union[str, 'IncidentClassification']]] = None,
classification_comment: Optional[pulumi.Input[str]] = None,
classification_reason: Optional[pulumi.Input[Union[str, 'IncidentClassificationReason']]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
first_activity_time_utc: Optional[pulumi.Input[str]] = None,
incident_id: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IncidentLabelArgs']]]]] = None,
last_activity_time_utc: Optional[pulumi.Input[str]] = None,
owner: Optional[pulumi.Input[pulumi.InputType['IncidentOwnerInfoArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
severity: Optional[pulumi.Input[Union[str, 'IncidentSeverity']]] = None,
status: Optional[pulumi.Input[Union[str, 'IncidentStatus']]] = None,
title: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents an incident in Azure Security Insights.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'IncidentClassification']] classification: The reason the incident was closed
:param pulumi.Input[str] classification_comment: Describes the reason the incident was closed
:param pulumi.Input[Union[str, 'IncidentClassificationReason']] classification_reason: The classification reason the incident was closed with
:param pulumi.Input[str] description: The description of the incident
:param pulumi.Input[str] etag: Etag of the azure resource
:param pulumi.Input[str] first_activity_time_utc: The time of the first activity in the incident
:param pulumi.Input[str] incident_id: Incident ID
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IncidentLabelArgs']]]] labels: List of labels relevant to this incident
:param pulumi.Input[str] last_activity_time_utc: The time of the last activity in the incident
:param pulumi.Input[pulumi.InputType['IncidentOwnerInfoArgs']] owner: Describes a user that the incident is assigned to
:param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param pulumi.Input[Union[str, 'IncidentSeverity']] severity: The severity of the incident
:param pulumi.Input[Union[str, 'IncidentStatus']] status: The status of the incident
:param pulumi.Input[str] title: The title of the incident
:param pulumi.Input[str] workspace_name: The name of the workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['classification'] = classification
__props__['classification_comment'] = classification_comment
__props__['classification_reason'] = classification_reason
__props__['description'] = description
__props__['etag'] = etag
__props__['first_activity_time_utc'] = first_activity_time_utc
if incident_id is None and not opts.urn:
raise TypeError("Missing required property 'incident_id'")
__props__['incident_id'] = incident_id
__props__['labels'] = labels
__props__['last_activity_time_utc'] = last_activity_time_utc
__props__['owner'] = owner
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if severity is None and not opts.urn:
raise TypeError("Missing required property 'severity'")
__props__['severity'] = severity
if status is None and not opts.urn:
raise TypeError("Missing required property 'status'")
__props__['status'] = status
if title is None and not opts.urn:
raise TypeError("Missing required property 'title'")
__props__['title'] = title
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['additional_data'] = None
__props__['created_time_utc'] = None
__props__['incident_number'] = None
__props__['incident_url'] = None
__props__['last_modified_time_utc'] = None
__props__['name'] = None
__props__['related_analytic_rule_ids'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights:Incident"), pulumi.Alias(type_="azure-nextgen:securityinsights/latest:Incident")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Incident, __self__).__init__(
'azure-nextgen:securityinsights/v20200101:Incident',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Incident':
"""
Get an existing Incident resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Incident(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="additionalData")
def additional_data(self) -> pulumi.Output['outputs.IncidentAdditionalDataResponse']:
"""
Additional data on the incident
"""
return pulumi.get(self, "additional_data")
@property
@pulumi.getter
def classification(self) -> pulumi.Output[Optional[str]]:
"""
The reason the incident was closed
"""
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="classificationComment")
def classification_comment(self) -> pulumi.Output[Optional[str]]:
"""
Describes the reason the incident was closed
"""
return pulumi.get(self, "classification_comment")
@property
@pulumi.getter(name="classificationReason")
def classification_reason(self) -> pulumi.Output[Optional[str]]:
"""
The classification reason the incident was closed with
"""
return pulumi.get(self, "classification_reason")
@property
@pulumi.getter(name="createdTimeUtc")
def created_time_utc(self) -> pulumi.Output[str]:
"""
The time the incident was created
"""
return pulumi.get(self, "created_time_utc")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the incident
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firstActivityTimeUtc")
def first_activity_time_utc(self) -> pulumi.Output[Optional[str]]:
"""
The time of the first activity in the incident
"""
return pulumi.get(self, "first_activity_time_utc")
@property
@pulumi.getter(name="incidentNumber")
def incident_number(self) -> pulumi.Output[int]:
"""
A sequential number
"""
return pulumi.get(self, "incident_number")
@property
@pulumi.getter(name="incidentUrl")
def incident_url(self) -> pulumi.Output[str]:
"""
The deep-link url to the incident in Azure portal
"""
return pulumi.get(self, "incident_url")
@property
@pulumi.getter
def labels(self) -> pulumi.Output[Optional[Sequence['outputs.IncidentLabelResponse']]]:
"""
List of labels relevant to this incident
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="lastActivityTimeUtc")
def last_activity_time_utc(self) -> pulumi.Output[Optional[str]]:
"""
The time of the last activity in the incident
"""
return pulumi.get(self, "last_activity_time_utc")
@property
@pulumi.getter(name="lastModifiedTimeUtc")
def last_modified_time_utc(self) -> pulumi.Output[str]:
"""
The last time the incident was updated
"""
return pulumi.get(self, "last_modified_time_utc")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def owner(self) -> pulumi.Output[Optional['outputs.IncidentOwnerInfoResponse']]:
"""
Describes a user that the incident is assigned to
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="relatedAnalyticRuleIds")
def related_analytic_rule_ids(self) -> pulumi.Output[Sequence[str]]:
"""
List of resource ids of Analytic rules related to the incident
"""
return pulumi.get(self, "related_analytic_rule_ids")
@property
@pulumi.getter
def severity(self) -> pulumi.Output[str]:
"""
The severity of the incident
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the incident
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def title(self) -> pulumi.Output[str]:
"""
The title of the incident
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
PypiClean
|
/pascalvoc_to_image-1.0.0.tar.gz/pascalvoc_to_image-1.0.0/pascalvoc_to_image/__init__.py
|
import argparse
import os
import xml.etree.ElementTree as ET
import time
from tqdm import tqdm
from PIL import Image
# Main entry function to start the program
def main():
parser = argparse.ArgumentParser()
parser.add_argument('pascalDirectory', metavar='pascalDirectory', type=str, help='A path to the directory with Pascal VOC XML files')
parser.add_argument('imageDirectory', metavar='imageDirectory', type=str, help='A path to the directory with images')
parser.add_argument('saveDirectory', metavar='saveDirectory', type=str, help='A path to the directory to save Pascal boundingbox images to')
args = parser.parse_args()
run(args.pascalDirectory, args.imageDirectory, args.saveDirectory)
# Main function responsible for running necessary code
def run(pascal_dir, image_dir, save_dir):
pascal_files = get_pascal_files(pascal_dir)
parsed_pascal_files = parse_pascal_files(pascal_files, image_dir)
make_dir(save_dir)
create_label_dirs(parsed_pascal_files.get('labels'), save_dir)
pascalvoc_to_images(parsed_pascal_files.get('pascal_data'), save_dir)
# Get all PascalVOC xml files from a specific path
def get_pascal_files(path):
# Array of dicts with file data
files = []
# Loop through all files at a certain path
for file in tqdm(os.listdir(path)):
# Only consider XML
if file.endswith('.xml'):
# Store relevant file data
files.append({ 'base': path, 'filename': file, 'path': os.path.join(path, file)})
return files
# Parse a specific PascalVOC file to a usable dict format
def parse_pascal_file(file, image_dir):
xml_path = file.get('path')
# XML root
xml = ET.parse(xml_path)
# Img name corresponding to XML
img_name = xml.find('filename').text
# Img path corresponding to XML
img_path = xml.find('path').text
# Array of individual objects in a single PascalVOC XML file
objects = []
# A set of labels within a single PascalVOC XML file
labels = set()
# Loop through all labeled objects and add to objects/labels
for i, obj in enumerate(xml.iter('object')):
# Number each individual object to be able to get multiple objects from one file
object_number = i + 1
object_name = '{}_{}'.format(object_number, img_name)
object_label = obj.find('name').text
object_bndbox = obj.find('bndbox')
labels.add(obj.find('name').text)
objects.append({
'path': os.path.join(image_dir, img_name),
'name': object_name,
'xmin': object_bndbox.find('xmin').text,
'xmax': object_bndbox.find('xmax').text,
'ymin': object_bndbox.find('ymin').text,
'ymax': object_bndbox.find('ymax').text,
'label': object_label
})
return { 'objects': objects, 'labels': labels }
# Parse all pascal files
def parse_pascal_files(files, image_dir):
pascal_data = []
labels = set()
# Loop through all PascalVOC XML files and parse them
for file in tqdm(files, ascii=True, desc="Parsing pascal files"):
try:
parses = parse_pascal_file(file, image_dir)
# Merge all object labels
labels = labels.union(parses.get('labels'))
# Merge all pascal data
pascal_data += parses.get('objects')
except Exception as e:
# Just error if a single file can't be read
print('Error reading PascalVOC XML file.')
print('ERROR:' + str(e))
return { 'pascal_data': pascal_data, 'labels': labels }
# Loop through all PascalVOC data and cut an image from each
def pascalvoc_to_images(pascal_data, save_path):
for item in tqdm(pascal_data, ascii=True, desc="Creating images from pascal data"):
pascalvoc_to_image(item, save_path)
# Cut an image from a PascalVOC file data
def pascalvoc_to_image(pascal_data, save_path):
# Create the bndbox to cut from
bndbox = (int(pascal_data.get('xmin')), int(pascal_data.get('ymin')), int(pascal_data.get('xmax')), int(pascal_data.get('ymax')))
# Load the original image
image = Image.open(pascal_data.get('path'))
# Cut a new image from the image using bndbox
image = image.crop(bndbox)
try:
# Save the image to the save_path in the corresponding label folder
image.save(os.path.join(save_path, pascal_data.get('label'), pascal_data.get('name')))
except Exception as e:
# Just error if a single image does not save
print('Error saving cut image')
print('ERROR: ' + str(e))
# Function to create all label directories
def create_label_dirs(labels, save_path):
for label in tqdm(labels, ascii=True, desc="Creating label directories"):
make_dir(save_path, label)
# Helper function to create a directory if it does not already exists
def make_dir(path, name = ''):
path = os.path.abspath(os.path.join(path, name))
if not os.path.exists(path):
try:
os.makedirs(path)
except Exception as e:
# Raise if directory can't be made, because image cuts won't be saved.
print('Error creating directory')
raise e
|
PypiClean
|
/django-asyncviews-0.1.0.tar.gz/django-asyncviews-0.1.0/README.md
|
Django AsyncViews
=================


Asynchronous JSON/HTML view library
## Quickstart
Install Django AsyncViews:
```sh
pip install django-asyncviews
```
Add it to your `INSTALLED_APPS`:
```python
INSTALLED_APPS = (
...
'asyncviews',
...
)
```
Add AsyncViews' URL patterns:
```python
from asyncviews import urls as asyncviews_urls
urlpatterns = [
...
url(r'^', include(asyncviews_urls)),
...
]
```
## Running tests
Does the code actually work?
```
coverage run --source asyncviews runtests.py
```
## Credits
Tools used in rendering this package:
- [Cookiecutter](https://github.com/audreyr/cookiecutter)
- [`cookiecutter-djangopackage`](https://github.com/pydanny/cookiecutter-djangopackage)
|
PypiClean
|
/codex_africanus-0.3.4-py3-none-any.whl/africanus/rime/fast_beam_cubes.py
|
from functools import reduce
import numpy as np
from africanus.util.docs import DocstringTemplate
from africanus.util.numba import njit
@njit(nogil=True, cache=True)
def freq_grid_interp(frequency, beam_freq_map):
# Interpolated grid coordinate
beam_nud = beam_freq_map.shape[0]
freq_data = np.empty((frequency.shape[0], 3),
dtype=frequency.dtype)
for f in range(frequency.shape[0]):
freq = frequency[f]
lower = 0
upper = beam_nud - 1
while lower <= upper:
mid = lower + (upper - lower) // 2
beam_freq = beam_freq_map[mid]
if beam_freq < freq:
lower = mid + 1
elif beam_freq > freq:
upper = mid - 1
else:
lower = mid
break
# This handles the lower <= upper in the while loop
lower = min(lower, upper)
upper = lower + 1
# Set up scaling, lower weight, lower grid pos
if lower == -1:
freq_data[f, 0] = freq / beam_freq_map[0]
freq_data[f, 1] = 1.0
freq_data[f, 2] = 0
elif upper == beam_nud:
freq_data[f, 0] = freq / beam_freq_map[beam_nud - 1]
freq_data[f, 1] = 0.0
freq_data[f, 2] = beam_nud - 2
else:
freq_data[f, 0] = 1.0
freq_low = beam_freq_map[lower]
freq_high = beam_freq_map[upper]
freq_diff = freq_high - freq_low
freq_data[f, 1] = (freq_high - freq) / freq_diff
freq_data[f, 2] = lower
return freq_data
@njit(nogil=True, cache=True)
def beam_cube_dde(beam, beam_lm_extents, beam_freq_map,
lm, parallactic_angles, point_errors, antenna_scaling,
frequency):
nsrc = lm.shape[0]
ntime, nants = parallactic_angles.shape
nchan = frequency.shape[0]
beam_lw, beam_mh, beam_nud = beam.shape[:3]
corrs = beam.shape[3:]
if beam_lw < 2 or beam_mh < 2 or beam_nud < 2:
raise ValueError("beam_lw, beam_mh and beam_nud must be >= 2")
# Flatten correlations
ncorrs = reduce(lambda x, y: x*y, corrs, 1)
lower_l, upper_l = beam_lm_extents[0]
lower_m, upper_m = beam_lm_extents[1]
ex_dtype = beam_lm_extents.dtype
# Maximum l and m indices in float and int
lmaxf = ex_dtype.type(beam_lw - 1)
mmaxf = ex_dtype.type(beam_mh - 1)
lmaxi = beam_lw - 1
mmaxi = beam_mh - 1
lscale = lmaxf / (upper_l - lower_l)
mscale = mmaxf / (upper_m - lower_m)
one = ex_dtype.type(1)
zero = ex_dtype.type(0)
# Flatten the beam on correlation
fbeam = beam.reshape((beam_lw, beam_mh, beam_nud, ncorrs))
# Allocate output array with correlations flattened
fjones = np.empty((nsrc, ntime, nants, nchan, ncorrs), dtype=beam.dtype)
# Compute frequency interpolation stuff
freq_data = freq_grid_interp(frequency, beam_freq_map)
corr_sum = np.zeros((ncorrs,), dtype=beam.dtype)
absc_sum = np.zeros((ncorrs,), dtype=beam.real.dtype)
beam_scratch = np.zeros((ncorrs,), dtype=beam.dtype)
for t in range(ntime):
for a in range(nants):
sin_pa = np.sin(parallactic_angles[t, a])
cos_pa = np.cos(parallactic_angles[t, a])
for s in range(nsrc):
# Extract lm coordinates
l, m = lm[s]
for f in range(nchan):
# Unpack frequency data
freq_scale = freq_data[f, 0]
# lower and upper frequency weights
nud = freq_data[f, 1]
inv_nud = 1.0 - nud
# lower and upper frequency grid position
gc0 = np.int32(freq_data[f, 2])
gc1 = gc0 + 1
# Apply any frequency scaling
sl = l * freq_scale
sm = m * freq_scale
# Add pointing errors
tl = sl + point_errors[t, a, f, 0]
tm = sm + point_errors[t, a, f, 1]
# Rotate lm coordinate angle
vl = tl*cos_pa - tm*sin_pa
vm = tl*sin_pa + tm*cos_pa
# Scale by antenna scaling
vl *= antenna_scaling[a, f, 0]
vm *= antenna_scaling[a, f, 1]
# Shift into the cube coordinate system
vl = lscale*(vl - lower_l)
vm = mscale*(vm - lower_m)
# Clamp the coordinates to the edges of the cube
vl = max(zero, min(vl, lmaxf))
vm = max(zero, min(vm, mmaxf))
# Snap to the lower grid coordinates
gl0 = np.int32(np.floor(vl))
gm0 = np.int32(np.floor(vm))
# Snap to the upper grid coordinates
gl1 = min(gl0 + 1, lmaxi)
gm1 = min(gm0 + 1, mmaxi)
# Difference between grid and offset coordinates
ld = vl - gl0
md = vm - gm0
# Zero accumulation arrays
corr_sum[:] = 0
absc_sum[:] = 0
# Accumulate lower cube correlations
beam_scratch[:] = fbeam[gl0, gm0, gc0, :]
weight = (one - ld)*(one - md)*nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
beam_scratch[:] = fbeam[gl1, gm0, gc0, :]
weight = ld*(one - md)*nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
beam_scratch[:] = fbeam[gl0, gm1, gc0, :]
weight = (one - ld)*md*nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
beam_scratch[:] = fbeam[gl1, gm1, gc0, :]
weight = ld*md*nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
# Accumulate upper cube correlations
beam_scratch[:] = fbeam[gl0, gm0, gc1, :]
weight = (one - ld)*(one - md)*inv_nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
beam_scratch[:] = fbeam[gl1, gm0, gc1, :]
weight = ld*(one - md)*inv_nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
beam_scratch[:] = fbeam[gl0, gm1, gc1, :]
weight = (one - ld)*md*inv_nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
beam_scratch[:] = fbeam[gl1, gm1, gc1, :]
weight = ld*md*inv_nud
for c in range(ncorrs):
absc_sum[c] += weight * np.abs(beam_scratch[c])
corr_sum[c] += weight * beam_scratch[c]
for c in range(ncorrs):
# Added all correlations, normalise
div = np.abs(corr_sum[c])
if div == 0.0:
# This case probably works out to a zero assign
corr_sum[c] *= absc_sum[c]
else:
corr_sum[c] *= absc_sum[c] / div
# Assign normalised values
fjones[s, t, a, f, :] = corr_sum
return fjones.reshape((nsrc, ntime, nants, nchan) + corrs)
BEAM_CUBE_DOCS = DocstringTemplate(
r"""
Evaluates Direction Dependent Effects along a source's path
by interpolating the values of a complex beam cube
at the source location.
Notes
-----
1. Sources are clamped to the provided `beam_lm_extents`.
2. Frequencies outside the cube (i.e. outside beam_freq_map)
introduce linear scaling to the lm coordinates of a source.
Parameters
----------
beam : $(array_type)
Complex beam cube of
shape :code:`(beam_lw, beam_mh, beam_nud, corr, corr)`.
`beam_lw`, `beam_mh` and `beam_nud` define the size
of the cube in the l, m and frequency dimensions, respectively.
beam_lm_extents : $(array_type)
lm extents of the beam cube of shape :code:`(2, 2)`.
``[[lower_l, upper_l], [lower_m, upper_m]]``.
beam_freq_map : $(array_type)
Beam frequency map of shape :code:`(beam_nud,)`.
This array is used to define interpolation along
the :code:`(chan,)` dimension.
lm : $(array_type)
Source lm coordinates of shape :code:`(source, 2)`.
These coordinates are:
1. Scaled if the associated frequency lies outside the beam cube.
2. Offset by pointing errors: ``point_errors``
3. Rotated by parallactic angles: ``parallactic_angles``.
4. Scaled by antenna scaling factors: ``antenna_scaling``.
parallactic_angles : $(array_type)
Parallactic angles of shape :code:`(time, ant)`.
point_errors : $(array_type)
Pointing errors of shape :code:`(time, ant, chan, 2)`.
antenna_scaling : $(array_type)
Antenna scaling factors of shape :code:`(ant, chan, 2)`
frequency : $(array_type)
Frequencies of shape :code:`(chan,)`.
Returns
-------
ddes : $(array_type)
Direction Dependent Effects of shape
:code:`(source, time, ant, chan, corr, corr)`
""")
try:
beam_cube_dde.__doc__ = BEAM_CUBE_DOCS.substitute(
array_type=":class:`numpy.ndarray`")
except AttributeError:
pass
|
PypiClean
|
/local_llama_index-0.5.11.tar.gz/local_llama_index-0.5.11/local_llama_index/prompts/prompts.py
|
from typing import List
from local_llama_index.prompts.base import Prompt
from local_llama_index.prompts.prompt_type import PromptType
class SummaryPrompt(Prompt):
"""Summary prompt.
Prompt to summarize the provided `context_str`.
Required template variables: `context_str`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.SUMMARY
input_variables: List[str] = ["context_str"]
class TreeInsertPrompt(Prompt):
"""Tree Insert prompt.
Prompt to insert a new chunk of text `new_chunk_text` into the tree index.
More specifically, this prompt has the LLM select the relevant candidate
child node to continue tree traversal.
Required template variables: `num_chunks`, `context_list`, `new_chunk_text`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.TREE_INSERT
input_variables: List[str] = ["num_chunks", "context_list", "new_chunk_text"]
class TreeSelectPrompt(Prompt):
"""Tree select prompt.
Prompt to select a candidate child node out of all child nodes
provided in `context_list`, given a query `query_str`. `num_chunks` is
the number of child nodes in `context_list`.
Required template variables: `num_chunks`, `context_list`, `query_str`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.TREE_SELECT
input_variables: List[str] = ["num_chunks", "context_list", "query_str"]
class TreeSelectMultiplePrompt(Prompt):
"""Tree select multiple prompt.
Prompt to select multiple candidate child nodes out of all
child nodes provided in `context_list`, given a query `query_str`.
`branching_factor` refers to the number of child nodes to select, and
`num_chunks` is the number of child nodes in `context_list`.
Required template variables: `num_chunks`, `context_list`, `query_str`,
`branching_factor`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type = PromptType.TREE_SELECT_MULTIPLE
input_variables: List[str] = [
"num_chunks",
"context_list",
"query_str",
"branching_factor",
]
class RefinePrompt(Prompt):
"""Refine prompt.
Prompt to refine an existing answer `existing_answer` given a context `context_msg`,
and a query `query_str`.
Required template variables: `query_str`, `existing_answer`, `context_msg`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
# TODO: rename context_msg to context_str
prompt_type: PromptType = PromptType.REFINE
input_variables: List[str] = ["query_str", "existing_answer", "context_msg"]
class QuestionAnswerPrompt(Prompt):
"""Question Answer prompt.
Prompt to answer a question `query_str` given a context `context_str`.
Required template variables: `context_str`, `query_str`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.QUESTION_ANSWER
input_variables: List[str] = ["context_str", "query_str"]
class KeywordExtractPrompt(Prompt):
"""Keyword extract prompt.
Prompt to extract keywords from a text `text` with a maximum of
`max_keywords` keywords.
Required template variables: `text`, `max_keywords`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.KEYWORD_EXTRACT
input_variables: List[str] = ["text", "max_keywords"]
class QueryKeywordExtractPrompt(Prompt):
"""Query keyword extract prompt.
Prompt to extract keywords from a query `query_str` with a maximum
of `max_keywords` keywords.
Required template variables: `query_str`, `max_keywords`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.QUERY_KEYWORD_EXTRACT
input_variables: List[str] = ["question", "max_keywords"]
class SchemaExtractPrompt(Prompt):
"""Schema extract prompt.
Prompt to extract schema from unstructured text `text`.
Required template variables: `text`, `schema`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.SCHEMA_EXTRACT
input_variables: List[str] = ["text", "schema"]
class TextToSQLPrompt(Prompt):
"""Text to SQL prompt.
Prompt to translate a natural language query into SQL in the dialect
`dialect` given a schema `schema`.
Required template variables: `query_str`, `schema`, `dialect`
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.TEXT_TO_SQL
input_variables: List[str] = ["query_str", "schema", "dialect"]
class TableContextPrompt(Prompt):
"""Table context prompt.
Prompt to generate a table context given a table schema `schema`,
as well as unstructured text context `context_str`, and
a task `query_str`.
This includes both a high-level description of the table
as well as a description of each column in the table.
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.TABLE_CONTEXT
input_variables: List[str] = ["schema", "context_str", "query_str"]
class RefineTableContextPrompt(Prompt):
"""Refine Table context prompt.
Prompt to refine a table context given a table schema `schema`,
as well as unstructured text context `context_msg`, and
a task `query_str`.
This includes both a high-level description of the table
as well as a description of each column in the table.
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
# TODO: rename context_msg to context_str
prompt_type: PromptType = PromptType.TABLE_CONTEXT
input_variables: List[str] = [
"schema",
"context_msg",
"query_str",
"existing_answer",
]
class KnowledgeGraphPrompt(Prompt):
"""Define the knowledge graph triplet extraction prompt."""
prompt_type: PromptType = PromptType.KNOWLEDGE_TRIPLET_EXTRACT
input_variables: List[str] = ["max_knowledge_triplets", "text"]
class SimpleInputPrompt(Prompt):
"""Simple Input prompt.
Required template variables: `query_str`.
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.SIMPLE_INPUT
input_variables: List[str] = ["query_str"]
class PandasPrompt(Prompt):
"""Pandas prompt. Convert query to python code.
Required template variables: `query_str`, `df_str`, `instruction_str`.
Args:
template (str): Template for the prompt.
**prompt_kwargs: Keyword arguments for the prompt.
"""
prompt_type: PromptType = PromptType.PANDAS
input_variables: List[str] = ["query_str", "df_str", "instruction_str"]
|
PypiClean
|
/python-ffmpeg-2.0.4.tar.gz/python-ffmpeg-2.0.4/README.md
|
# python-ffmpeg
A python binding for FFmpeg which provides sync and async APIs
## Help
See [documentation](https://python-ffmpeg.readthedocs.io) for more details.
## Install
To install **python-ffmpeg**, simply use pip:
```console
$ pip install python-ffmpeg
```
## Examples
You can find more examples in [the documentation](https://python-ffmpeg.readthedocs.io/).
### Transcoding
#### Synchronous API
```python
from ffmpeg import FFmpeg, Progress
def main():
ffmpeg = (
FFmpeg()
.option("y")
.input("input.mp4")
.output(
"ouptut.mp4",
{"codec:v": "libx264"},
vf="scale=1280:-1",
preset="veryslow",
crf=24,
)
)
ffmpeg.execute()
if __name__ == "__main__":
main()
```
#### Asynchronous API
``` python
import asyncio
from ffmpeg import Progress
from ffmpeg.asyncio import FFmpeg
async def main():
ffmpeg = (
FFmpeg()
.option("y")
.input("input.mp4")
.output(
"ouptut.mp4",
{"codec:v": "libx264"},
vf="scale=1280:-1",
preset="veryslow",
crf=24,
)
)
await ffmpeg.execute()
if __name__ == "__main__":
asyncio.run(main())
```
### Recording
#### Synchronous API
```python
from ffmpeg import FFmpeg, Progress
def main():
ffmpeg = (
FFmpeg()
.option("y")
.input(
"rtsp://username:[email protected]/cam",
rtsp_transport="tcp",
rtsp_flags="prefer_tcp",
)
.output("output.mp4", vcodec="copy")
)
@ffmpeg.on("progress")
def time_to_terminate(progress: Progress):
if progress.frame > 200:
ffmpeg.terminate()
ffmpeg.execute()
if __name__ == "__main__":
main()
```
#### Asynchronous API
``` python
import asyncio
from ffmpeg import Progress
from ffmpeg.asyncio import FFmpeg
async def main():
ffmpeg = (
FFmpeg()
.option("y")
.input(
"rtsp://username:[email protected]/cam",
rtsp_transport="tcp",
rtsp_flags="prefer_tcp",
)
.output("output.mp4", vcodec="copy")
)
@ffmpeg.on("progress")
def time_to_terminate(progress: Progress):
if progress.frame > 200:
ffmpeg.terminate()
await ffmpeg.execute()
if __name__ == "__main__":
asyncio.run(main())
```
|
PypiClean
|
/plone.patternslib-1.3.0-py3-none-any.whl/plone/patternslib/static/components/patternslib/docs/developer/create-a-pattern.md
|
# How to create a new pattern
This document provides a quick tutorial on how to create a new Patternslib pattern.
Patterns are implemented as javascript objects that are registered with the Patternslib library.
## Creating a colorchanger pattern
In this tutorial we will create a new pattern called pat-colorchanger.
This pattern changes the text-color of an element after waiting for 3 seconds.
### Creating the pattern directory
To start off, lets create a new directory in which we will put our pattern's
files, and then lets navigate into it.
```
mkdir pat-colorchanger
cd pat-colorchanger
```
### Using the Yeoman generator
Instead of manually typing out the code shown in this tutorial, you can simply
use the [Yeoman Patternslib generator](https://www.npmjs.com/package/generator-patternslib) to generate the appropriate skeleton for you.
If [Yeoman](http://yeoman.io/) is not installed, you can get it via npm:
sudo npm install -g yo
Then, simply run the following commands inside the `pat-colorchanger`
directory you created in the previous section.
sudo npm install -g generator-patternslib
yo patternslib pat-colorchanger
In our example we are creating for demonstration purposes the pattern
pat-colorchanger, but you will of course choose a more appropriate
name for your own pattern.
### The directory layout
Each pattern should have a certain layout. Look for example at [pat-pickadate](https://github.com/Patternslib/pat-pickadate).
There is one subdirectory, called *src*, inside the *pat-pickadate* repository.
It contains the pattern's actual Javascript source file(s).
The Yeoman generator will create the correct layout and all the necessary
files.
However, if you are doing this manually instead of using Yeoman, then create this directory as well as the files required:
touch README.md index.html src/pat-colorchanger.js
## Determining the HTML markup for the pattern
Patterns are configured via a declarative HTML syntax.
Usually a particular pattern is invoked by specifying its name as a HTML class on a DOM object.
The invoked pattern then acts upon that specifc DOM element. In our example case, the pattern
changes the text color after 3 seconds. This color change is applied to the DOM
element on which the pattern is declared.
The pattern can be configured by specifying HTML5 data attributes, which start with the
`data-` prefix, followed by the pattern's name.
So in our case, that is `data-pat-colorchanger`.
For example:
<p class="pat-colorchanger" data-pat-colorchanger="color: blue" style="color: red">
This text will turn from red into blue after 3 seconds.
</p>
The HTML markup as shown above, which illustrates how your pattern functions, should be put
inside the `index.html` file. This file can then be used by designers and integrators
to demo the pattern's functionality.
When you are designing your pattern, you need to decide on a relevant name for it,
and how it should be configured.
For a reference of all the ways a pattern could be configured, please read the
[Parameters](../parameter-syntax/#main-content) page.
## Writing the pattern's javascript
We are now ready to start writing the Javascript for our pattern.
Put this code into `./src/pat-colorchanger.js`
```
(function (root, factory) {
if (typeof define === 'function' && define.amd) {
// Make this module AMD (Asynchronous Module Definition) compatible,
// so that it can be used with Require.js or other module loaders.
define([
"pat-registry",
"pat-parser",
"pat-base"
], function() {
return factory.apply(this, arguments);
});
} else {
// A module loader is not available. In this case, we need the
// patterns library to be available as a global variable "patterns"
factory(root.patterns, root.patterns.Parser, root.patterns.Base);
}
}(this, function(registry, Parser, Base) {
// This is the actual module and in here we put the code for the pattern.
// Tell the interpreter to execute in "strict" mode.
// For more info: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Strict_mode
"use strict";
/* We instantiate a new Parser instance, which will parse HTML markup
* looking for configuration settings for this pattern.
*
* This example pattern's name is pat-colorchanger. It is activated on a DOM
* element by giving the element the HTML class "pat-colorchanger".
*
* The pattern can be configured by specifying an HTML5 data attribute
* "data-pat-colorchanger" which contains the configuration parameters
* Only configuration parameters specified here are valid.
*
* For example:
* <p class="pat-colorchanger" data-pat-colorchanger="color: blue">Hello World</p>
*/
var parser = new Parser("colorchanger");
parser.addArgument("color", "red"); // A configuration parameter and its default value.
// We now create and return our custom pattern.
// We extend the Base pattern so our custom pattern will be automatically registered.
return Base.extend({
name: "colorchanger",
// Most patterns deal with markup: they are activated for content that matches
// a specific CSS selector. This is handled by adding two items to the
// pattern specification: a trigger attribute and an init function.
trigger: ".pat-colorchanger", // The CSS selector that triggers this pattern
// Patterns can also act as jQuery plugins.
// An example of calling the plugin: $("div").patColorchanger()
jquery_plugin: true,
init: function patExampleInit($el, opts) {
// $el is the DOM element on which the pattern is declared.
// It gets passed in to init, but is also available on the
// pattern itself, just call this.$el.
var options = parser.parse($el, opts); // Parse the DOM element to retrieve the
// configuration settings.
setTimeout($.proxy(function () {
this.setColor($el, options);
}, this), 3000);
},
setColor: function patExampleSetColor($el, options) {
$el.css("color", options.color);
}
});
}));
```
This pattern can be loaded directly in your browser after a standard Patterns bundle has been loaded.
```
<html>
<body>
<script src="patterns-2.0.0.js"></script>
<script src="/src/pat-colorchanger.js"></script>
</body>
</html>
```
There is a general rule that patterns should only trigger for elements that
have a `pat-<pattern name>` class. This is reflected in the ``trigger`` for our
pattern: it specifies that this pattern applies to any DOM element with the
`pat-colorchanger` class.
When the page loads (and also when content is injected via AJAX) the ``init``
function of our pattern will be called once for each matched DOM element.
|
PypiClean
|
/imu4gopigo3ros-0.4-py3-none-any.whl/rosBNO055.py
|
from __future__ import print_function
from __future__ import division
import di_i2c
import time
# Constants
# I2C addresses
ADDRESS_A = 0x28
ADDRESS_B = 0x29
ID = 0xA0
# Page id register definition
REG_PAGE_ID = 0x07
# PAGE0 REGISTER DEFINITION START
REG_CHIP_ID = 0x00
REG_ACCEL_REV_ID = 0x01
REG_MAG_REV_ID = 0x02
REG_GYRO_REV_ID = 0x03
REG_SW_REV_ID_LSB = 0x04
REG_SW_REV_ID_MSB = 0x05
REG_BL_REV_ID = 0x06
# Accel data register
REG_ACCEL_DATA_X_LSB = 0x08
REG_ACCEL_DATA_X_MSB = 0x09
REG_ACCEL_DATA_Y_LSB = 0x0A
REG_ACCEL_DATA_Y_MSB = 0x0B
REG_ACCEL_DATA_Z_LSB = 0x0C
REG_ACCEL_DATA_Z_MSB = 0x0D
# Mag data register
REG_MAG_DATA_X_LSB = 0x0E
REG_MAG_DATA_X_MSB = 0x0F
REG_MAG_DATA_Y_LSB = 0x10
REG_MAG_DATA_Y_MSB = 0x11
REG_MAG_DATA_Z_LSB = 0x12
REG_MAG_DATA_Z_MSB = 0x13
# Gyro data registers
REG_GYRO_DATA_X_LSB = 0x14
REG_GYRO_DATA_X_MSB = 0x15
REG_GYRO_DATA_Y_LSB = 0x16
REG_GYRO_DATA_Y_MSB = 0x17
REG_GYRO_DATA_Z_LSB = 0x18
REG_GYRO_DATA_Z_MSB = 0x19
# Euler data registers
REG_EULER_H_LSB = 0x1A
REG_EULER_H_MSB = 0x1B
REG_EULER_R_LSB = 0x1C
REG_EULER_R_MSB = 0x1D
REG_EULER_P_LSB = 0x1E
REG_EULER_P_MSB = 0x1F
# Quaternion data registers
REG_QUATERNION_DATA_W_LSB = 0x20
REG_QUATERNION_DATA_W_MSB = 0x21
REG_QUATERNION_DATA_X_LSB = 0x22
REG_QUATERNION_DATA_X_MSB = 0x23
REG_QUATERNION_DATA_Y_LSB = 0x24
REG_QUATERNION_DATA_Y_MSB = 0x25
REG_QUATERNION_DATA_Z_LSB = 0x26
REG_QUATERNION_DATA_Z_MSB = 0x27
# Linear acceleration data registers
REG_LINEAR_ACCEL_DATA_X_LSB = 0x28
REG_LINEAR_ACCEL_DATA_X_MSB = 0x29
REG_LINEAR_ACCEL_DATA_Y_LSB = 0x2A
REG_LINEAR_ACCEL_DATA_Y_MSB = 0x2B
REG_LINEAR_ACCEL_DATA_Z_LSB = 0x2C
REG_LINEAR_ACCEL_DATA_Z_MSB = 0x2D
# Gravity data registers
REG_GRAVITY_DATA_X_LSB = 0x2E
REG_GRAVITY_DATA_X_MSB = 0x2F
REG_GRAVITY_DATA_Y_LSB = 0x30
REG_GRAVITY_DATA_Y_MSB = 0x31
REG_GRAVITY_DATA_Z_LSB = 0x32
REG_GRAVITY_DATA_Z_MSB = 0x33
# Temperature data register
REG_TEMP = 0x34
# Status registers
REG_CALIB_STAT = 0x35
REG_SELFTEST_RESULT = 0x36
REG_INTR_STAT = 0x37
REG_SYS_CLK_STAT = 0x38
REG_SYS_STAT = 0x39
REG_SYS_ERR = 0x3A
# Unit selection register
REG_UNIT_SEL = 0x3B
UNIT_SEL_ACC = 0x01
UNIT_SEL_GYR = 0x02
UNIT_SEL_EUL = 0x04
UNIT_SEL_TEMP = 0x10
UNIT_SEL_ORI = 0x80
REG_DATA_SELECT = 0x3C
# Mode registers
REG_OPR_MODE = 0x3D
REG_PWR_MODE = 0x3E
REG_SYS_TRIGGER = 0x3F
REG_TEMP_SOURCE = 0x40
# Axis remap registers
REG_AXIS_MAP_CONFIG = 0x41
REG_AXIS_MAP_SIGN = 0x42
# Axis remap values
AXIS_REMAP_X = 0x00
AXIS_REMAP_Y = 0x01
AXIS_REMAP_Z = 0x02
AXIS_REMAP_POSITIVE = 0x00
AXIS_REMAP_NEGATIVE = 0x01
# SIC registers
REG_SIC_MATRIX_0_LSB = 0x43
REG_SIC_MATRIX_0_MSB = 0x44
REG_SIC_MATRIX_1_LSB = 0x45
REG_SIC_MATRIX_1_MSB = 0x46
REG_SIC_MATRIX_2_LSB = 0x47
REG_SIC_MATRIX_2_MSB = 0x48
REG_SIC_MATRIX_3_LSB = 0x49
REG_SIC_MATRIX_3_MSB = 0x4A
REG_SIC_MATRIX_4_LSB = 0x4B
REG_SIC_MATRIX_4_MSB = 0x4C
REG_SIC_MATRIX_5_LSB = 0x4D
REG_SIC_MATRIX_5_MSB = 0x4E
REG_SIC_MATRIX_6_LSB = 0x4F
REG_SIC_MATRIX_6_MSB = 0x50
REG_SIC_MATRIX_7_LSB = 0x51
REG_SIC_MATRIX_7_MSB = 0x52
REG_SIC_MATRIX_8_LSB = 0x53
REG_SIC_MATRIX_8_MSB = 0x54
# Accelerometer Offset registers
REG_ACCEL_OFFSET_X_LSB = 0x55
REG_ACCEL_OFFSET_X_MSB = 0x56
REG_ACCEL_OFFSET_Y_LSB = 0x57
REG_ACCEL_OFFSET_Y_MSB = 0x58
REG_ACCEL_OFFSET_Z_LSB = 0x59
REG_ACCEL_OFFSET_Z_MSB = 0x5A
# Magnetometer Offset registers
REG_MAG_OFFSET_X_LSB = 0x5B
REG_MAG_OFFSET_X_MSB = 0x5C
REG_MAG_OFFSET_Y_LSB = 0x5D
REG_MAG_OFFSET_Y_MSB = 0x5E
REG_MAG_OFFSET_Z_LSB = 0x5F
REG_MAG_OFFSET_Z_MSB = 0x60
# Gyroscope Offset registers
REG_GYRO_OFFSET_X_LSB = 0x61
REG_GYRO_OFFSET_X_MSB = 0x62
REG_GYRO_OFFSET_Y_LSB = 0x63
REG_GYRO_OFFSET_Y_MSB = 0x64
REG_GYRO_OFFSET_Z_LSB = 0x65
REG_GYRO_OFFSET_Z_MSB = 0x66
# Radius registers
REG_ACCEL_RADIUS_LSB = 0x67
REG_ACCEL_RADIUS_MSB = 0x68
REG_MAG_RADIUS_LSB = 0x69
REG_MAG_RADIUS_MSB = 0x6A
# Power modes
POWER_MODE_NORMAL = 0x00
POWER_MODE_LOWPOWER = 0x01
POWER_MODE_SUSPEND = 0x02
# Operation mode settings
OPERATION_MODE_CONFIG = 0x00
OPERATION_MODE_ACCONLY = 0x01
OPERATION_MODE_MAGONLY = 0x02
OPERATION_MODE_GYRONLY = 0x03
OPERATION_MODE_ACCMAG = 0x04
OPERATION_MODE_ACCGYRO = 0x05
OPERATION_MODE_MAGGYRO = 0x06
OPERATION_MODE_AMG = 0x07
OPERATION_MODE_IMUPLUS = 0x08
OPERATION_MODE_COMPASS = 0x09
OPERATION_MODE_M4G = 0x0A
OPERATION_MODE_NDOF_FMC_OFF = 0x0B
OPERATION_MODE_NDOF = 0x0C
class BNO055(object):
def __init__(self, bus = "RPI_1SW", address = ADDRESS_A, mode = OPERATION_MODE_NDOF, units = 0, init = True, verbose=False):
"""
Initialize the object and optionally the hardware sensor
Keyword arguments:
bus (default "RPI_1SW") -- The I2C bus
address (default ADDRESS_A) -- The BNO055 I2C address
mode (default OPERATION_MODE_NDOF) -- The operation mode
units (default 0) -- The value unit selection bits
init (default True) -- False initializes software object only, does not alter hardware configuration.
"""
if verbose:
if (init == True):
print("BNO055 Instantiating on BUS {} with ADDRESS {} to MODE {} using UNITS {} HW INIT {}".format(bus, address, mode, units, init))
else:
print("BNO055 Instantiating on BUS {} with ADDRESS {} using UNITS {} HW INIT {}".format(bus, address, units, init))
# create an I2C bus object and set the address
self.i2c_bus = di_i2c.DI_I2C(bus = bus, address = address)
if init==True:
# Save desired operation mode
self._mode = mode
# Send a thow-away command and ignore any response or I2C errors
# just to make sure the BNO055 is in a good state and ready to accept
# commands (this seems to be necessary after a hard power down).
try:
self.i2c_bus.write_reg_8(REG_PAGE_ID, 0)
except IOError:
# pass on an I2C IOError
pass
# switch to config mode
self._config_mode(verbose=verbose)
self.i2c_bus.write_reg_8(REG_PAGE_ID, 0)
# check the chip ID
if ID != self.i2c_bus.read_8(REG_CHIP_ID):
raise RuntimeError("BNO055 failed to respond")
if self.i2c_bus.read_8(REG_TEMP_SOURCE) != 0x01:
if verbose: print("Doing init")
# reset the device using the reset command
self.i2c_bus.write_reg_8(REG_SYS_TRIGGER, 0x20)
# wait 650ms after reset for chip to be ready (recommended in datasheet)
time.sleep(0.65)
# set to normal power mode
self.i2c_bus.write_reg_8(REG_PWR_MODE, POWER_MODE_NORMAL)
# default to internal oscillator
self.i2c_bus.write_reg_8(REG_SYS_TRIGGER, 0x00)
# set temperature source to gyroscope, as it seems to be more accurate.
self.i2c_bus.write_reg_8(REG_TEMP_SOURCE, 0x01)
else:
pass
if verbose: print("Skipping init")
# set the unit selection bits
self.i2c_bus.write_reg_8(REG_UNIT_SEL, units)
# set temperature source to gyroscope, as it seems to be more accurate.
self.i2c_bus.write_reg_8(REG_TEMP_SOURCE, 0x01)
# switch to normal operation mode
self._operation_mode(verbose=verbose)
else: # init = False
self._mode = self.get_operation_mode()
if verbose: print("Set _mode to:",self._mode)
if verbose: print("BNO055 Instantiation Complete")
def _config_mode(self, verbose=False):
# switch to configuration mode
#self.set_mode(OPERATION_MODE_CONFIG)
mode = OPERATION_MODE_CONFIG
if verbose: print("config_mode: {}".format(mode))
self.i2c_bus.write_reg_8(REG_OPR_MODE, mode & 0xFF)
# delay for 30 milliseconds according to datasheet
time.sleep(0.03)
def _operation_mode(self,verbose=False):
# switch to operation mode (to read sensor data)
self.set_mode(self._mode,verbose=verbose)
def set_mode(self, mode, verbose=False):
"""Set operation mode for the sensor
Keyword arguments:
mode -- the operation mode. See BNO055 datasheet tables 3-3 and 3-5."""
if verbose: print("set_mode: {}".format(mode))
self.i2c_bus.write_reg_8(REG_OPR_MODE, mode & 0xFF)
# delay for 30 milliseconds according to datasheet
time.sleep(0.03)
self._mode = mode
def get_revision(self):
"""Get revision numbers
Returns a tuple with revision numbers for Software revision, Bootloader
version, Accelerometer ID, Magnetometer ID, and Gyro ID."""
# Read revision values.
accel = self.i2c_bus.read_8(REG_ACCEL_REV_ID)
mag = self.i2c_bus.read_8(REG_MAG_REV_ID)
gyro = self.i2c_bus.read_8(REG_GYRO_REV_ID)
bl = self.i2c_bus.read_8(REG_BL_REV_ID)
sw_lsb = self.i2c_bus.read_8(REG_SW_REV_ID_LSB)
sw_msb = self.i2c_bus.read_8(REG_SW_REV_ID_MSB)
sw = ((sw_msb << 8) | sw_lsb) & 0xFFFF
# Return the results as a tuple of all 5 values.
return (sw, bl, accel, mag, gyro)
def set_external_crystal(self, external_crystal):
"""Set the BNO055 to use the internal/external oscillator
Keyword arguments:
external_crystal -- use external crystal?"""
# Switch to configuration mode.
self._config_mode()
# Set the clock bit appropriately in the SYS_TRIGGER register.
if external_crystal:
self.i2c_bus.write_reg_8(REG_SYS_TRIGGER, 0x80)
else:
self.i2c_bus.write_reg_8(REG_SYS_TRIGGER, 0x00)
# Go back to normal operation mode.
self._operation_mode()
def get_system_status(self, run_self_test = True):
"""Get the sensor system status
Keyword arguments:
run_self_test (default True) -- Run a self test? This will make the sensor go into config mode which will stop the fusion engine.
Returns a tuple with status information. Three values will be returned:
- System status register value with the following meaning:
0 = Idle
1 = System Error
2 = Initializing Peripherals
3 = System Initialization
4 = Executing Self-Test
5 = Sensor fusion algorithm running
6 = System running without fusion algorithms
- Self test result register value with the following meaning:
Bit value: 1 = test passed, 0 = test failed
Bit 0 = Accelerometer self test
Bit 1 = Magnetometer self test
Bit 2 = Gyroscope self test
Bit 3 = MCU self test
Value of 0x0F = all good!
- System error register value with the following meaning:
0 = No error
1 = Peripheral initialization error
2 = System initialization error
3 = Self test result failed
4 = Register map value out of range
5 = Register map address out of range
6 = Register map write error
7 = BNO low power mode not available for selected operation mode
8 = Accelerometer power mode not available
9 = Fusion algorithm configuration error
10 = Sensor configuration error
"""
# run a self test?
if run_self_test:
# Switch to configuration mode if running self test.
self._config_mode()
# Perform a self test.
sys_trigger = self.i2c_bus.read_8(REG_SYS_TRIGGER)
self.i2c_bus.write_reg_8(REG_SYS_TRIGGER, sys_trigger | 0x1)
# Wait for self test to finish.
time.sleep(1.0)
# Read test result.
self_test = self.i2c_bus.read_8(REG_SELFTEST_RESULT)
# Go back to operation mode.
self._operation_mode()
else:
self_test = None
# read status and error values
status = self.i2c_bus.read_8(REG_SYS_STAT)
error = self.i2c_bus.read_8(REG_SYS_ERR)
# return the results as a tuple of all 3 values
return (status, self_test, error)
def get_calibration_status(self):
"""
Get calibration status of the `InertialMeasurementUnit Sensor`_.
The moment the sensor is powered, this method should be called almost continuously until the sensor is fully calibrated.
For calibrating the sensor faster, it's enough to hold the sensor for a couple of seconds on each "face" of an imaginary cube.
For each component of the system, there is a number that says how much the component has been calibrated:
* **System**, ``3`` = fully calibrated, ``0`` = not calibrated.
* **Gyroscope**, ``3`` = fully calibrated, ``0`` = not calibrated.
* **Accelerometer**, ``3`` = fully calibrated, ``0`` = not calibrated.
* **Magnetometer**, ``3`` = fully calibrated, ``0`` = not calibrated.
:returns: A tuple where each member shows how much a component of the IMU is calibrated. See the above description of the method.
:rtype: (int,int,int,int)
:raises ~exceptions.OSError: When the `InertialMeasurementUnit Sensor`_ is not reachable.
.. important::
The sensor needs a new calibration each time it's powered up.
"""
# Return the calibration status register value.
cal_status = self.i2c_bus.read_8(REG_CALIB_STAT)
sys = (cal_status >> 6) & 0x03
gyro = (cal_status >> 4) & 0x03
accel = (cal_status >> 2) & 0x03
mag = cal_status & 0x03
# Return the results as a tuple of all 3 values.
return (sys, gyro, accel, mag)
def get_calibration(self):
"""Get calibration data
Returns the sensor's calibration data as an array of 22 bytes.
Can be saved and then reloaded with set_calibration to quickly
calibrate from a previously calculated set of calibration data.
"""
# Switch to configuration mode, as mentioned in section 3.10.4 of datasheet.
self._config_mode()
# Read the 22 bytes of calibration data
cal_data = self.i2c_bus.read_list(REG_ACCEL_OFFSET_X_LSB, 22)
# Go back to normal operation mode.
self._operation_mode()
return cal_data
def set_calibration(self, data):
"""Set calibration data
Keyword arguments:
data -- a 22 byte list of calibration data to write to the sensor that was previously read with get_calibration.
"""
# Check that 22 bytes were passed in with calibration data.
if data is None or len(data) != 22:
raise ValueError('set_calibration Expects a list of 22 bytes of calibration data')
# Switch to configuration mode, as mentioned in section 3.10.4 of datasheet.
self._config_mode()
# Set the 22 bytes of calibration data.
self.i2c_bus.write_reg_list(REG_ACCEL_OFFSET_X_LSB, data)
# Go back to normal operation mode.
self._operation_mode()
def get_axis_remap(self):
"""Get axis remap information
Returns a tuple with the axis remap register values. This will return
6 values with the following meaning:
- X axis remap (a value of AXIS_REMAP_X, AXIS_REMAP_Y, or AXIS_REMAP_Z.
which indicates that the physical X axis of the chip
is remapped to a different axis)
- Y axis remap (see above)
- Z axis remap (see above)
- X axis sign (a value of AXIS_REMAP_POSITIVE or AXIS_REMAP_NEGATIVE
which indicates if the X axis values should be positive/
normal or negative/inverted. The default is positive.)
- Y axis sign (see above)
- Z axis sign (see above)
Note that by default the axis orientation of the BNO chip looks like
the following (taken from section 3.4, page 24 of the datasheet). Notice
the dot in the corner that corresponds to the dot on the BNO chip:
| Z axis
|
| / X axis
____|__/____
Y axis / * | / /|
_________ /______|/ //
/___________ //
|____________|/
NOTE: DI IMU
- Y is direction of arrow head
- X is toward right side when head up looking at the chip side
- Z is coming at you when looking at the chip side
DI IMU For ROS On GoPiGo3 (No axis remap needed if mounted like this)
- Mount with chip side up, arrow head pointing to left side of bot
- X is forward
- Y is toward left side
- Z is up
"""
# Get the axis remap register value.
map_config = self.i2c_bus.read_8(REG_AXIS_MAP_CONFIG)
z = (map_config >> 4) & 0x03
y = (map_config >> 2) & 0x03
x = map_config & 0x03
# Get the axis remap sign register value.
sign_config = self.i2c_bus.read_8(REG_AXIS_MAP_SIGN)
x_sign = (sign_config >> 2) & 0x01
y_sign = (sign_config >> 1) & 0x01
z_sign = sign_config & 0x01
# Return the results as a tuple of all 3 values.
return (x, y, z, x_sign, y_sign, z_sign)
def set_axis_remap(self, x, y, z, x_sign=AXIS_REMAP_POSITIVE,
y_sign=AXIS_REMAP_POSITIVE, z_sign=AXIS_REMAP_POSITIVE, verbose=False):
"""Set axis remap
Keyword arguments:
x -- set to one of AXIS_REMAP_X, AXIS_REMAP_Y, or AXIS_REMAP_Z
y -- ''
z -- ''
x_sign -- set to AXIS_REMAP_POSITIVE or AXIS_REMAP_NEGATIVE
y_sign -- ''
z_sign -- ''
See the get_axis_remap documentation and datasheet section 3.4 for more information
"""
# Switch to configuration mode.
self._config_mode(verbose=verbose)
# Set the axis remap register value.
map_config = 0x00
map_config |= (z & 0x03) << 4
map_config |= (y & 0x03) << 2
map_config |= x & 0x03
self.i2c_bus.write_reg_8(REG_AXIS_MAP_CONFIG, map_config)
# Set the axis remap sign register value.
sign_config = 0x00
sign_config |= (x_sign & 0x01) << 2
sign_config |= (y_sign & 0x01) << 1
sign_config |= z_sign & 0x01
self.i2c_bus.write_reg_8(REG_AXIS_MAP_SIGN, sign_config)
# Go back to normal operation mode.
self._operation_mode(verbose=verbose)
def _read_vector(self, reg, count = 3):
# Read count number of 16-bit signed values starting from the provided
# register. Returns a tuple of the values that were read.
data = self.i2c_bus.read_list(reg, count*2)
result = [0]*count
for i in range(count):
result[i] = (((data[(i * 2) + 1] & 0xFF) << 8) | (data[(i * 2)] & 0xFF)) & 0xFFFF
if result[i] & 0x8000: #> 32767:
result[i] -= 0x10000 #65536
return result
def read_euler(self):
"""Read the absolute orientation
Returns the current absolute orientation as a tuple of heading, roll, and pitch euler angles in degrees."""
heading, roll, pitch = self._read_vector(REG_EULER_H_LSB)
return (heading/16.0, roll/16.0, pitch/16.0)
def read_magnetometer(self):
"""Read the magnetometer
Returns the current magnetometer reading as a tuple of X, Y, Z values in micro-Teslas."""
x, y, z = self._read_vector(REG_MAG_DATA_X_LSB)
return (x/16.0, y/16.0, z/16.0)
def read_gyroscope(self):
"""Read the gyroscope
Returns the current gyroscope (angular velocity) reading as a tuple of X, Y, Z values in degrees per second."""
(x, y, z) = self._read_vector(REG_GYRO_DATA_X_LSB)
return (x/16.0, y/16.0, z/16.0)
def read_accelerometer(self):
"""Read the accelerometer
Returns the current accelerometer reading as a tuple of X, Y, Z values in meters/second^2."""
x, y, z = self._read_vector(REG_ACCEL_DATA_X_LSB)
return (x/100.0, y/100.0, z/100.0)
def read_linear_acceleration(self):
"""Read linear acceleration
Returns the current linear acceleration (acceleration from movement not from gravity) reading as a tuple of X, Y, Z values in meters/second^2."""
x, y, z = self._read_vector(REG_LINEAR_ACCEL_DATA_X_LSB)
return (x/100.0, y/100.0, z/100.0)
def read_gravity(self):
"""Read gravity
Returns the current gravity reading as a tuple of X, Y, Z values in meters/second^2."""
x, y, z = self._read_vector(REG_GRAVITY_DATA_X_LSB)
return (x/100.0, y/100.0, z/100.0)
def read_quaternion(self):
"""Read the quaternion values
Returns the current orientation as a tuple of X, Y, Z, W quaternion values."""
w, x, y, z = self._read_vector(REG_QUATERNION_DATA_W_LSB, 4)
# Scale values, see 3.6.5.5 in the datasheet.
scale = (1.0 / (1<<14))
return (x*scale, y*scale, z*scale, w*scale)
def read_temp(self):
"""Read the temperature
Returns the current temperature in degrees celsius."""
return self.i2c_bus.read_8(REG_TEMP, signed = True)
def get_operation_mode(self):
"""Read the operation mode
"""
op_mode =self.i2c_bus.read_8(REG_OPR_MODE)
# print("op_mode: {:d}".format(op_mode))
return op_mode
|
PypiClean
|
/z3c.pt-4.0.tar.gz/z3c.pt-4.0/README.rst
|
========
z3c.pt
========
.. image:: https://img.shields.io/pypi/v/z3c.pt.svg
:target: https://pypi.python.org/pypi/z3c.pt/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/z3c.pt.svg
:target: https://pypi.org/project/z3c.pt/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/z3c.pt/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/z3c.pt/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/z3c.pt/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/z3c.pt?branch=master
.. image:: https://readthedocs.org/projects/z3cpt/badge/?version=latest
:target: https://z3cpt.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
This is a fast implementation of the ZPT template engine for Zope 3
which uses Chameleon to compile templates to byte-code.
The package provides application support equivalent to
``zope.pagetemplate``.
|
PypiClean
|
/tensorflow_intel-2.14.0rc0-cp39-cp39-win_amd64.whl/tensorflow/python/ops/logging_ops.py
|
"""Logging and Summary Operations."""
# pylint: disable=protected-access
import collections as py_collections
import os
import pprint
import random
import sys
from absl import logging
from tensorflow.python import pywrap_tfe
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def enable_interactive_logging():
pywrap_tfe.TFE_Py_EnableInteractivePythonLogging()
# Register printing to the cell output if we are in a Colab or Jupyter Notebook.
try:
get_ipython() # Exists in an ipython env like Jupyter or Colab
enable_interactive_logging()
except NameError:
pass
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in Python 2, so we must
# have an upper-case version of them. When support for it is dropped,
# we can allow lowercase.
# See https://github.com/tensorflow/tensorflow/issues/18053
# pylint: disable=invalid-name
@deprecated("2018-08-20", "Use tf.print instead of tf.Print. Note that "
"tf.print returns a no-output operator that directly "
"prints the output. Outside of defuns or eager mode, "
"this operator will not be executed unless it is "
"directly specified in session.run or used as a "
"control dependency for other operators. This is "
"only a concern in graph mode. Below is an example "
"of how to ensure tf.print executes in graph mode:\n")
@tf_export(v1=["Print"])
@dispatch.add_dispatch_support
def Print(input_, data, message=None, first_n=None, summarize=None, name=None):
"""Prints a list of tensors.
This is an identity op (behaves like `tf.identity`) with the side effect
of printing `data` when evaluating.
Note: This op prints to the standard error. It is not currently compatible
with jupyter notebook (printing to the notebook *server's* output, not into
the notebook).
@compatibility(TF2)
This API is deprecated. Use `tf.print` instead. `tf.print` does not need the
`input_` argument.
`tf.print` works in TF2 when executing eagerly and inside a `tf.function`.
In TF1-styled sessions, an explicit control dependency declaration is needed
to execute the `tf.print` operation. Refer to the documentation of
`tf.print` for more details.
@end_compatibility
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and contents as `input_`.
```python
sess = tf.compat.v1.Session()
with sess.as_default():
tensor = tf.range(10)
print_op = tf.print(tensor)
with tf.control_dependencies([print_op]):
out = tf.add(tensor, tensor)
sess.run(out)
```
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
# pylint: enable=invalid-name
def _generate_placeholder_string(x, default_placeholder="{}"):
"""Generate and return a string that does not appear in `x`."""
placeholder = default_placeholder
rng = random.Random(5)
while placeholder in x:
placeholder = placeholder + str(rng.randint(0, 9))
return placeholder
def _is_filepath(output_stream):
"""Returns True if output_stream is a file path."""
return isinstance(output_stream, str) and output_stream.startswith("file://")
# Temporarily disable pylint g-doc-args error to allow giving more context
# about what the kwargs are.
# Because we are using arbitrary-length positional arguments, python 2
# does not support explicitly specifying the keyword arguments in the
# function definition.
# pylint: disable=g-doc-args
@tf_export("print")
@dispatch.add_dispatch_support
def print_v2(*inputs, **kwargs):
"""Print the specified inputs.
A TensorFlow operator that prints the specified inputs to a desired
output stream or logging level. The inputs may be dense or sparse Tensors,
primitive python objects, data structures that contain tensors, and printable
Python objects. Printed tensors will recursively show the first and last
elements of each dimension to summarize.
Example:
Single-input usage:
```python
tensor = tf.range(10)
tf.print(tensor, output_stream=sys.stderr)
```
(This prints "[0 1 2 ... 7 8 9]" to sys.stderr)
Multi-input usage:
```python
tensor = tf.range(10)
tf.print("tensors:", tensor, {2: tensor * 2}, output_stream=sys.stdout)
```
(This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to
sys.stdout)
Changing the input separator:
```python
tensor_a = tf.range(2)
tensor_b = tensor_a * 2
tf.print(tensor_a, tensor_b, output_stream=sys.stderr, sep=',')
```
(This prints "[0 1],[0 2]" to sys.stderr)
Usage in a `tf.function`:
```python
@tf.function
def f():
tensor = tf.range(10)
tf.print(tensor, output_stream=sys.stderr)
return tensor
range_tensor = f()
```
(This prints "[0 1 2 ... 7 8 9]" to sys.stderr)
*Compatibility usage in TF 1.x graphs*:
In graphs manually created outside of `tf.function`, this method returns
the created TF operator that prints the data. To make sure the
operator runs, users need to pass the produced op to
`tf.compat.v1.Session`'s run method, or to use the op as a control
dependency for executed ops by specifying
`with tf.compat.v1.control_dependencies([print_op])`.
```python
tf.compat.v1.disable_v2_behavior() # for TF1 compatibility only
sess = tf.compat.v1.Session()
with sess.as_default():
tensor = tf.range(10)
print_op = tf.print("tensors:", tensor, {2: tensor * 2},
output_stream=sys.stdout)
with tf.control_dependencies([print_op]):
tripled_tensor = tensor * 3
sess.run(tripled_tensor)
```
(This prints "tensors: [0 1 2 ... 7 8 9] {2: [0 2 4 ... 14 16 18]}" to
sys.stdout)
Note: In Jupyter notebooks and colabs, `tf.print` prints to the notebook
cell outputs. It will not write to the notebook kernel's console logs.
Args:
*inputs: Positional arguments that are the inputs to print. Inputs in the
printed output will be separated by spaces. Inputs may be python
primitives, tensors, data structures such as dicts and lists that may
contain tensors (with the data structures possibly nested in arbitrary
ways), and printable python objects.
output_stream: The output stream, logging level, or file to print to.
Defaults to sys.stderr, but sys.stdout, tf.compat.v1.logging.info,
tf.compat.v1.logging.warning, tf.compat.v1.logging.error,
absl.logging.info, absl.logging.warning and absl.logging.error are also
supported. To print to a file, pass a string started with "file://"
followed by the file path, e.g., "file:///tmp/foo.out".
summarize: The first and last `summarize` elements within each dimension are
recursively printed per Tensor. If None, then the first 3 and last 3
elements of each dimension are printed for each tensor. If set to -1, it
will print all elements of every tensor.
sep: The string to use to separate the inputs. Defaults to " ".
end: End character that is appended at the end the printed string. Defaults
to the newline character.
name: A name for the operation (optional).
Returns:
None when executing eagerly. During graph tracing this returns
a TF operator that prints the specified inputs in the specified output
stream or logging level. This operator will be automatically executed
except inside of `tf.compat.v1` graphs and sessions.
Raises:
ValueError: If an unsupported output stream is specified.
"""
# Because we are using arbitrary-length positional arguments, python 2
# does not support explicitly specifying the keyword arguments in the
# function definition. So, we manually get the keyword arguments w/ default
# values here.
output_stream = kwargs.pop("output_stream", sys.stderr)
name = kwargs.pop("name", None)
summarize = kwargs.pop("summarize", 3)
sep = kwargs.pop("sep", " ")
end = kwargs.pop("end", os.linesep)
if kwargs:
raise ValueError("Unrecognized keyword arguments for tf.print: %s" % kwargs)
format_name = None
if name:
format_name = name + "_format"
# Match the C++ string constants representing the different output streams.
# Keep this updated!
output_stream_to_constant = {
sys.stdout: "stdout",
sys.stderr: "stderr",
tf_logging.INFO: "log(info)",
tf_logging.info: "log(info)",
tf_logging.WARN: "log(warning)",
tf_logging.warning: "log(warning)",
tf_logging.warn: "log(warning)",
tf_logging.ERROR: "log(error)",
tf_logging.error: "log(error)",
logging.INFO: "log(info)",
logging.info: "log(info)",
logging.INFO: "log(info)",
logging.WARNING: "log(warning)",
logging.WARN: "log(warning)",
logging.warning: "log(warning)",
logging.warn: "log(warning)",
logging.ERROR: "log(error)",
logging.error: "log(error)",
}
if _is_filepath(output_stream):
output_stream_string = output_stream
else:
output_stream_string = output_stream_to_constant.get(output_stream)
if not output_stream_string:
raise ValueError("Unsupported output stream, logging level, or file." +
str(output_stream) +
". Supported streams are sys.stdout, "
"sys.stderr, tf.logging.info, "
"tf.logging.warning, tf.logging.error. " +
"File needs to be in the form of 'file://<filepath>'.")
# If we are only printing a single string scalar, there is no need to format
if (len(inputs) == 1 and tensor_util.is_tf_type(inputs[0]) and
(not isinstance(inputs[0], sparse_tensor.SparseTensor)) and
(inputs[0].shape.ndims == 0) and (inputs[0].dtype == dtypes.string)):
formatted_string = inputs[0]
# Otherwise, we construct an appropriate template for the tensors we are
# printing, and format the template using those tensors.
else:
# For each input to this print function, we extract any nested tensors,
# and construct an appropriate template to format representing the
# printed input.
templates = []
tensors = []
# If an input to the print function is of type `OrderedDict`, sort its
# elements by the keys for consistency with the ordering of `nest.flatten`.
# This is not needed for `dict` types because `pprint.pformat()` takes care
# of printing the template in a sorted fashion.
inputs_ordered_dicts_sorted = []
for input_ in inputs:
if isinstance(input_, py_collections.OrderedDict):
inputs_ordered_dicts_sorted.append(
py_collections.OrderedDict(sorted(input_.items())))
else:
inputs_ordered_dicts_sorted.append(input_)
tensor_free_structure = nest.map_structure(
lambda x: "" if tensor_util.is_tf_type(x) else x,
inputs_ordered_dicts_sorted)
tensor_free_template = " ".join(
pprint.pformat(x) for x in tensor_free_structure)
placeholder = _generate_placeholder_string(tensor_free_template)
for input_ in inputs:
placeholders = []
# Use the nest utilities to flatten & process any nested elements in this
# input. The placeholder for a tensor in the template should be the
# placeholder string, and the placeholder for a non-tensor can just be
# the printed value of the non-tensor itself.
for x in nest.flatten(input_):
# support sparse tensors
if isinstance(x, sparse_tensor.SparseTensor):
tensors.extend([x.indices, x.values, x.dense_shape])
placeholders.append(
"SparseTensor(indices={}, values={}, shape={})".format(
placeholder, placeholder, placeholder))
elif tensor_util.is_tf_type(x):
tensors.append(x)
placeholders.append(placeholder)
else:
placeholders.append(x)
if isinstance(input_, str):
# If the current input to format/print is a normal string, that string
# can act as the template.
cur_template = input_
else:
# We pack the placeholders into a data structure that matches the
# input data structure format, then format that data structure
# into a string template.
#
# NOTE: We must use pprint.pformat here for building the template for
# unordered data structures such as `dict`, because `str` doesn't
# guarantee orderings, while pprint prints in sorted order. pprint
# will match the ordering of `nest.flatten`.
# This even works when nest.flatten reorders OrderedDicts, because
# pprint is printing *after* the OrderedDicts have been reordered.
cur_template = pprint.pformat(
nest.pack_sequence_as(input_, placeholders))
templates.append(cur_template)
# We join the templates for the various inputs into a single larger
# template. We also remove all quotes surrounding the placeholders, so that
# the formatted/printed output will not contain quotes around tensors.
# (example of where these quotes might appear: if we have added a
# placeholder string into a list, then pretty-formatted that list)
template = sep.join(templates)
template = template.replace("'" + placeholder + "'", placeholder)
formatted_string = string_ops.string_format(
inputs=tensors,
template=template,
placeholder=placeholder,
summarize=summarize,
name=format_name)
return gen_logging_ops.print_v2(
formatted_string, output_stream=output_stream_string, name=name, end=end)
# pylint: enable=g-doc-args
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
@deprecated(
"2016-11-30", "Please switch to tf.summary.histogram. Note that "
"tf.summary.histogram uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look
['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to build the
histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops.histogram_summary(tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look
['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the
summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops.image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look
['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag` of the
summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(
sample_rate, dtype=dtypes.float32, name="sample_rate")
val = gen_logging_ops.audio_summary_v2(
tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.compat.v1.summary.merge, which has
identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops.merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which
has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
@deprecated(
"2016-11-30", "Please switch to tf.summary.scalar. Note that "
"tf.summary.scalar uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, passing a "
"tensor or list of tags to a scalar summary op is no longer "
"supported.")
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look
['here'](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops.scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
ops.NotDifferentiable("TensorSummary")
ops.NotDifferentiable("TensorSummaryV2")
ops.NotDifferentiable("Timestamp")
|
PypiClean
|
/scrapy_coco-0.1.1-py3-none-any.whl/coco/pagination/interface.py
|
import logging
from typing import Dict
from urllib.parse import ParseResult
from urllib.parse import urlparse as parse_url
from coco.exceptions import (NextPageNotFoundError,
PaginationPageThresholdReached)
LOG = logging.getLogger('coco')
class BasePagination:
__slots__ = ('_page', 'page_threshold', 'init_page_num')
def __init__(self, page_threshold: int = 1, init_page_num: int = 1):
self.page_threshold: int = page_threshold
self.init_page_num: int = init_page_num
self.page: int = init_page_num
def get_next_page(self, current_link: str, **kw: Dict) -> str:
raise NotImplementedError('get_next_page method must be implemented')
def reset(self) -> None:
self.page = self.init_page_num
@property
def page(self) -> int:
return self._page
@page.setter
def page(self, value: int):
if value > self.page_threshold:
raise PaginationPageThresholdReached
self._page = value
def __str__(self):
return f'Pagination @page N{self.page}'
class DummyPagination(BasePagination):
__slots__ = tuple()
def get_next_page(self, current_link: str, **kw: Dict) -> str:
raise NextPageNotFoundError
class OnsitePagination(BasePagination):
__slots__ = ('selector', 'prop')
def __init__(self,
selector: str,
prop: str,
page_threshold: int = 1,
init_page_num: int = 1) -> None:
super().__init__(page_threshold, init_page_num)
self.selector: str = selector
self.prop: str = prop
def get_next_page(self, current_link: str, **kw) -> str:
self.page += 1
cur_page_flag: bool = False
# TODO - check context type
context = kw.get('context')
for page_element in context.select(self.selector):
if cur_page_flag:
return page_element.get(self.prop)
if page_element.get(self.prop) == current_link:
cur_page_flag = True
raise NextPageNotFoundError
class QueryStringPagination(BasePagination):
__slots__ = ('param_name', )
def __init__(self,
param_name: str,
page_threshold: int = 1,
init_page_num: int = 1) -> None:
super().__init__(page_threshold, init_page_num)
self.param_name: str = param_name
def get_next_page(self, current_link: str, **kw: Dict) -> str:
parsed: ParseResult = parse_url(current_link)
eqs: str = f'{self.param_name}={self.page}' # existing query string
nqs: str = f'{self.param_name}={self.page + 1}' # new query string
next_page_url: str = None
if not parsed.query:
nqs = f'?{nqs}' if current_link.endswith('/') else f'/?{nqs}'
next_page_url = current_link + nqs
elif eqs not in parsed.query:
next_page_url = current_link + f'&{nqs}'
else:
next_page_url = current_link.replace(eqs, nqs)
self.page += 1
return next_page_url
class LinkPagination(BasePagination):
__slots__ = tuple()
def get_next_page(self, current_link: str, **kw: Dict) -> str:
self.page += 1
parsed: ParseResult = parse_url(current_link)
if parsed.path.endswith(f'/{self.page - 1}'):
return current_link.replace(f'/{self.page - 1}', f'/{self.page}')
elif parsed.path.endswith(f'/{self.page - 1}/'):
return current_link.replace(
f'/{self.page - 1}/', f'/{self.page}/')
elif parsed.path.endswith('/'):
return current_link.replace(
parsed.path, f'{parsed.path}{self.page}/')
else:
return current_link.replace(
parsed.path, f'{parsed.path}/{self.page}')
|
PypiClean
|
/jupedsim-0.10.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl/jupedsim_visualizer/main_window.py
|
import math
from pathlib import Path
from jupedsim_visualizer.geometry import Geometry
from jupedsim_visualizer.replay_widget import ReplayWidget
from jupedsim_visualizer.trajectory import Trajectory
from jupedsim_visualizer.view_geometry_widget import ViewGeometryWidget
from PySide6.QtCore import QSettings, QSize
from PySide6.QtStateMachine import QFinalState, QState, QStateMachine
from PySide6.QtWidgets import (
QApplication,
QFileDialog,
QMainWindow,
QMessageBox,
QTabWidget,
)
import jupedsim.py_jupedsim as jps
from jupedsim.recording import Recording
from jupedsim.serialization import parse_wkt
from jupedsim.util import build_jps_geometry
class MainWindow(QMainWindow):
def __init__(self, parent=None) -> None:
QMainWindow.__init__(self, parent)
self.settings = QSettings("jupedsim", "jupedsim_visualizer")
self.setWindowTitle("jupedsim_visualizer")
self._build_central_tabs_widget()
self._build_menu_bar()
self._build_state_machine()
self.setVisible(True)
def _build_central_tabs_widget(self):
tabs = QTabWidget(self)
tabs.setMinimumSize(QSize(640, 480))
tabs.setMovable(True)
tabs.setDocumentMode(True)
tabs.setTabsClosable(True)
tabs.setTabBarAutoHide(True)
tabs.tabCloseRequested.connect(tabs.removeTab)
self.setCentralWidget(tabs)
self.tabs = tabs
def _build_menu_bar(self) -> None:
menu = self.menuBar()
open_menu = menu.addMenu("File")
open_wkt_act = open_menu.addAction("Open wkt file")
open_wkt_act.triggered.connect(self._open_wkt)
open_replay_act = open_menu.addAction("Open replay file")
open_replay_act.triggered.connect(self._open_replay)
def _build_state_machine(self) -> None:
sm = QStateMachine(self)
sm.finished.connect(QApplication.quit)
start = self._build_start_state()
sm.addState(start)
exit = self._build_exit_state()
sm.addState(exit)
# start.addTransition(self.button.clicked, exit)
sm.setInitialState(start)
sm.start()
self.state_machine = sm
def _build_start_state(self) -> QState:
state = QState()
return state
def _build_show_wkt_state(self) -> QState:
state = QState()
return state
def _build_exit_state(self) -> QFinalState:
state = QFinalState()
return state
def _open_wkt(self):
base_path_obj = self.settings.value(
"files/last_wkt_location",
type=str,
defaultValue=Path("~").expanduser(),
)
base_path = Path(str(base_path_obj))
file, _ = QFileDialog.getOpenFileName(
self, caption="Open WKT file", dir=str(base_path)
)
if not file:
return
file = Path(file)
self.settings.setValue("files/last_wkt_location", str(file.parent))
try:
wkt = parse_wkt(Path(file).read_text(encoding="UTF-8"))
navi = jps.experimental.RoutingEngine(build_jps_geometry(wkt))
xmin, ymin, xmax, ymax = wkt.bounds
info_text = f"Dimensions: {math.ceil(xmax - xmin)}m x {math.ceil(ymax - ymin)}m Triangles: {len(navi.mesh())}"
name_text = f"Geometry: {file}"
self.setUpdatesEnabled(False)
geo = Geometry(navi)
tab = ViewGeometryWidget(navi, geo, name_text, info_text)
tab_idx = self.tabs.insertTab(0, tab, file.name)
self.tabs.setCurrentIndex(tab_idx)
self.setUpdatesEnabled(True)
except Exception as e:
QMessageBox.critical(
self,
"Error importing WKT geometry",
f"Error importing WKT geometry:\n{e}",
)
return
def _open_replay(self):
base_path_obj = self.settings.value(
"files/last_replay_location",
type=str,
defaultValue=Path("~").expanduser(),
)
base_path = Path(str(base_path_obj))
file, _ = QFileDialog.getOpenFileName(
self, caption="Open recording", dir=str(base_path)
)
if not file:
return
file = Path(file)
self.settings.setValue("files/last_replay_location", str(file.parent))
try:
rec = Recording(file.as_posix())
self.setUpdatesEnabled(False)
navi = jps.experimental.RoutingEngine(
build_jps_geometry(rec.geometry())
)
geo = Geometry(navi)
trajectory = Trajectory(rec)
tab = ReplayWidget(navi, rec, geo, trajectory)
tab_idx = self.tabs.insertTab(0, tab, file.name)
self.tabs.setCurrentIndex(tab_idx)
self.setUpdatesEnabled(True)
self.update()
except Exception as e:
QMessageBox.critical(
self,
"Error importing simulation recording",
f"Error importing simulation recording:\n{e}",
)
return
|
PypiClean
|
/django_bkendoz-1.0.7-py3-none-any.whl/django_bkendoz/serializers.py
|
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from django.contrib.auth import get_user_model
from .core import get_global_history_model
class GenericSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = "__all__"
depth = 1
class GenericHistorySerializer(serializers.ModelSerializer):
action = serializers.SerializerMethodField()
user_link = serializers.SerializerMethodField()
date = serializers.SerializerMethodField()
record_link = serializers.SerializerMethodField()
model_list_link = serializers.SerializerMethodField()
record = serializers.SerializerMethodField()
def get_action(self, global_history):
if global_history.action == "ED":
return f"""
<a class="modal-open get-object-changes" href="#" data-url="{global_history.get_absolute_url()}"
data-modal="#modal-changes"><i class="fas fa-pen hist-edit"></i></a>
"""
elif global_history.action == "NE":
return '<i class="fas fa-plus hist-add"></i>'
elif global_history.action == "DE":
return '<i class="fas fa-trash hist-del"></i>'
def get_user_link(self, global_history):
return global_history.user.to_datatable()
def get_date(self, global_history):
return global_history.date.strftime("%Y-%m-%d %H:%M:%S")
def get_record(self, global_history):
return global_history.content_object.history_object.name
def get_record_link(self, global_history):
if not global_history.action == "DE":
return global_history.content_object.history_object.to_datatable()
else:
return global_history.record_name
def get_model_list_link(self, global_history):
return global_history.content_object.history_object.__class__.href_to_datalist()
class Meta:
model = get_global_history_model()
fields = [
"action",
"user_link",
"date",
"record_link",
"model_list_link",
"record",
]
depth = 1
class GenericUserSerializer(serializers.ModelSerializer):
def create(self, validated_data):
user = get_user_model().objects.create_user(**validated_data)
return user
class Meta:
model = get_user_model()
fields = (
"username",
"first_name",
"last_name",
"email",
"password",
"style",
)
validators = [
UniqueTogetherValidator(
queryset=get_user_model().objects.all(), fields=["username", "email"]
)
]
|
PypiClean
|
/gphoto2-2.3.2.tar.gz/gphoto2-2.3.2/src/swig-gp2.5.28/widget.py
|
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
import gphoto2.abilities_list
import gphoto2.camera
import gphoto2.context
import gphoto2.file
import gphoto2.filesys
import gphoto2.list
import gphoto2.port_info_list
import gphoto2.port_log
import gphoto2.result
import gphoto2.version
# Pull in all the attributes from the low-level C/C++ module
if __package__ or "." in __name__:
from ._widget import *
else:
from _widget import *
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
|
PypiClean
|
/nipype-1.8.6.tar.gz/nipype-1.8.6/doc/devel/index.rst
|
.. _developers-guide-index:
=================
Developer Guide
=================
:Release: |version|
:Date: |today|
Since nipype is part of the NIPY_ project, we follow the same
conventions documented in the `NIPY Developers Guide
<http://nipy.org/devel>`_. For bleeding-edge version help see `Nightly documentation <http://www.mit.edu/~satra/nipype-nightly/>`_
.. toctree::
:maxdepth: 2
writing_custom_interfaces
gitwash/index
architecture
provenance
software_using_nipype
testing_nipype
.. include:: ../links_names.txt
|
PypiClean
|
/trucks-and-drones-0.0.6.tar.gz/trucks-and-drones-0.0.6/trucks_and_drones/simulation/auto_agent.py
|
import numpy as np
class BaseAutoAgent:
def __init__(self, temp_db):
self.temp_db = temp_db
def find_destination(self):
if (np.sum(self.temp_db.status_dict['v_stuck']) != 0
and bool(self.temp_db.constants_dict['v_is_truck'][self.temp_db.cur_v_index])
):
v_index = self.temp_db.nearest_neighbour(self.temp_db.vehicles(
self.temp_db.status_dict['v_coord'],
include=[[self.temp_db.status_dict['v_stuck'], 1]]
)
)
if v_index is None:
return None
return self.temp_db.status_dict['v_coord'][v_index]
else:
if (self.temp_db.base_groups['vehicles'][self.temp_db.cur_v_index].v_items.cur_value() > 0
and 0 in self.temp_db.customers(self.temp_db.status_dict['n_waiting'])[0]
and np.sum(self.temp_db.customers(self.temp_db.status_dict['n_items'])[0]) != 0
):
n_index = self.temp_db.nearest_neighbour(self.temp_db.customers(
self.temp_db.status_dict['n_coord'],
include=[[self.temp_db.status_dict['n_waiting'], 0]],
exclude=[[self.temp_db.status_dict['n_items'], 0]]
)
)
else:
n_index = self.find_depot()
self.temp_db.status_dict['v_to_n'][self.temp_db.cur_v_index] = n_index
self.temp_db.status_dict['v_to_n'][self.temp_db.cur_v_index] = n_index
if n_index is None:
return None
return self.temp_db.status_dict['n_coord'][n_index]
def find_v_to_unload(self):
if any(self.temp_db.v_transporting_v[self.temp_db.cur_v_index]):
return self.temp_db.v_transporting_v[self.temp_db.cur_v_index][0]
return None
def find_v_to_load(self):
if self.temp_db.constants_dict['v_is_truck'][self.temp_db.cur_v_index]:
return self.temp_db.nearest_neighbour(self.temp_db.vehicles(
self.temp_db.status_dict['v_coord'],
include=[[self.temp_db.constants_dict['v_loadable'], 1], [self.temp_db.status_dict['v_free'], 1]]
)
)
else:
return None
def find_customer(self):
return self.temp_db.nearest_neighbour(self.temp_db.customers(
self.temp_db.status_dict['n_coord'],
exclude=[[self.temp_db.status_dict['n_items'], 0]]
)
)
def find_depot(self):
return self.temp_db.nearest_neighbour(self.temp_db.depots(
self.temp_db.status_dict['n_coord'],
# exclude=[[self.temp_db.status_dict['n_items'], 0]]
)
)
|
PypiClean
|
/nornir_nautobot-2.6.0-py3-none-any.whl/nornir_nautobot/plugins/processors/__init__.py
|
import logging
from nornir.core.inventory import Host
from nornir.core.task import AggregatedResult, MultiResult, Task
LOGGER = logging.getLogger(__name__)
class BaseProcessor:
"""Base Processor for nornir."""
task_name = "'no task defined'"
def task_started(self, task: Task) -> None:
"""Boilerplate Nornir processor for task_started."""
def task_completed(self, task: Task, result: AggregatedResult) -> None:
"""Boilerplate Nornir processor for task_completed."""
def task_instance_started(self, task: Task, host: Host) -> None:
"""Boilerplate Nornir processor for task_instance_started."""
def task_instance_completed(self, task: Task, host: Host, result: MultiResult) -> None:
"""Boilerplate Nornir processor for task_instance_completed."""
def subtask_instance_started(self, task: Task, host: Host) -> None:
"""Boilerplate Nornir processor for subtask_instance_started."""
def subtask_instance_completed(self, task: Task, host: Host, result: MultiResult) -> None:
"""Boilerplate Nornir processor for subtask_instance_completed."""
class BaseLoggingProcessor(BaseProcessor):
"""Base Processor with logging for nornir."""
def task_started(self, task: Task) -> None:
"""Boilerplate Nornir processor for task_started with logging."""
LOGGER.info("%s | Task started", task.name)
def task_completed(self, task: Task, result: AggregatedResult) -> None:
"""Boilerplate Nornir processor for task_completed with logging."""
LOGGER.info("%s | Task task_completed", task.name)
def task_instance_started(self, task: Task, host: Host) -> None:
"""Boilerplate Nornir processor for task_instance_started with logging."""
LOGGER.info("%s | Task instance %s has started", host.name, task.name)
def task_instance_completed(self, task: Task, host: Host, result: MultiResult) -> None:
"""Boilerplate Nornir processor for task_instance_completed with logging."""
LOGGER.info("%s | Task instance %s has completed", host.name, task.name)
def subtask_instance_started(self, task: Task, host: Host) -> None:
"""Boilerplate Nornir processor for subtask_instance_started with logging."""
LOGGER.info("%s | Task instance subtask %s has started.", task.host.name, task.name)
def subtask_instance_completed(self, task: Task, host: Host, result: MultiResult) -> None:
"""Boilerplate Nornir processor for subtask_instance_completed with logging."""
LOGGER.info("%s | Task instance subtask %s has completed.", task.host.name, task.name)
|
PypiClean
|
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/sites/sites/frenchtorrentdb.py
|
import re
from loguru import logger
from flexget import plugin
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.event import event
from flexget.utils.soup import get_soup
logger = logger.bind(name='FTDB')
class UrlRewriteFTDB:
"""FTDB RSS url_rewrite"""
def url_rewritable(self, task, entry):
# url = entry['url']
if re.match(r'^http://www\.frenchtorrentdb\.com/[^/]+(?!/)[^/]+&rss=1', entry['url']):
return True
return False
def url_rewrite(self, task, entry):
old_url = entry['url']
page_url = old_url.replace('DOWNLOAD', 'INFOS')
page_url = page_url.replace('&rss=1', '')
new_url = self.parse_download_page(page_url, task.requests)
logger.debug('PAGE URL NEEDED : {}', page_url)
logger.debug('{} OLD is rewrited to NEW {}', old_url, new_url)
entry['url'] = new_url
def parse_download_page(self, page_url, requests):
page = requests.get(page_url)
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRewritingError(e)
tag_a = soup.find("a", {"class": "dl_link"})
if not tag_a:
if soup.findAll(text="Connexion ?"):
raise UrlRewritingError(
'You are not logged in,\
check if your cookie for\
authentication is up to date'
)
else:
raise UrlRewritingError(
'You have reached your download\
limit per 24hours, so I cannot\
get the torrent'
)
torrent_url = "http://www.frenchtorrentdb.com" + tag_a.get('href') + "&js=1"
logger.debug('TORRENT URL is : {}', torrent_url)
return torrent_url
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteFTDB, 'frenchtorrentdb', interfaces=['urlrewriter'], api_ver=2)
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/permission_grants/item/get_member_objects/get_member_objects_post_request_body.py
|
from __future__ import annotations
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
class GetMemberObjectsPostRequestBody(AdditionalDataHolder, Parsable):
def __init__(self,) -> None:
"""
Instantiates a new getMemberObjectsPostRequestBody and sets the default values.
"""
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
self._additional_data: Dict[str, Any] = {}
# The securityEnabledOnly property
self._security_enabled_only: Optional[bool] = None
@property
def additional_data(self,) -> Dict[str, Any]:
"""
Gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Returns: Dict[str, Any]
"""
return self._additional_data
@additional_data.setter
def additional_data(self,value: Dict[str, Any]) -> None:
"""
Sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
Args:
value: Value to set for the AdditionalData property.
"""
self._additional_data = value
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> GetMemberObjectsPostRequestBody:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: GetMemberObjectsPostRequestBody
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return GetMemberObjectsPostRequestBody()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"securityEnabledOnly": lambda n : setattr(self, 'security_enabled_only', n.get_bool_value()),
}
return fields
@property
def security_enabled_only(self,) -> Optional[bool]:
"""
Gets the securityEnabledOnly property value. The securityEnabledOnly property
Returns: Optional[bool]
"""
return self._security_enabled_only
@security_enabled_only.setter
def security_enabled_only(self,value: Optional[bool] = None) -> None:
"""
Sets the securityEnabledOnly property value. The securityEnabledOnly property
Args:
value: Value to set for the security_enabled_only property.
"""
self._security_enabled_only = value
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
writer.write_bool_value("securityEnabledOnly", self.security_enabled_only)
writer.write_additional_data_value(self.additional_data)
|
PypiClean
|
/uniohomeassistant-0.1.3.tar.gz/uniohomeassistant-0.1.3/homeassistant/components/digitalloggers/switch.py
|
from datetime import timedelta
import logging
import dlipower
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_CYCLETIME = "cycletime"
DEFAULT_NAME = "DINRelay"
DEFAULT_USERNAME = "admin"
DEFAULT_PASSWORD = "admin"
DEFAULT_TIMEOUT = 20
DEFAULT_CYCLETIME = 2
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.All(
vol.Coerce(int), vol.Range(min=1, max=600)
),
vol.Optional(CONF_CYCLETIME, default=DEFAULT_CYCLETIME): vol.All(
vol.Coerce(int), vol.Range(min=1, max=600)
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return DIN III Relay switch."""
host = config[CONF_HOST]
controller_name = config[CONF_NAME]
user = config[CONF_USERNAME]
pswd = config[CONF_PASSWORD]
tout = config[CONF_TIMEOUT]
cycl = config[CONF_CYCLETIME]
power_switch = dlipower.PowerSwitch(
hostname=host, userid=user, password=pswd, timeout=tout, cycletime=cycl
)
if not power_switch.verify():
_LOGGER.error("Could not connect to DIN III Relay")
return False
outlets = []
parent_device = DINRelayDevice(power_switch)
outlets.extend(
DINRelay(controller_name, parent_device, outlet) for outlet in power_switch[0:]
)
add_entities(outlets)
class DINRelay(SwitchEntity):
"""Representation of an individual DIN III relay port."""
def __init__(self, controller_name, parent_device, outlet):
"""Initialize the DIN III Relay switch."""
self._controller_name = controller_name
self._parent_device = parent_device
self._outlet = outlet
self._outlet_number = self._outlet.outlet_number
self._name = self._outlet.description
self._state = self._outlet.state == "ON"
@property
def name(self):
"""Return the display name of this relay."""
return f"{self._controller_name}_{self._name}"
@property
def is_on(self):
"""Return true if relay is on."""
return self._state
def turn_on(self, **kwargs):
"""Instruct the relay to turn on."""
self._outlet.on()
def turn_off(self, **kwargs):
"""Instruct the relay to turn off."""
self._outlet.off()
def update(self):
"""Trigger update for all switches on the parent device."""
self._parent_device.update()
outlet_status = self._parent_device.get_outlet_status(self._outlet_number)
self._name = outlet_status[1]
self._state = outlet_status[2] == "ON"
class DINRelayDevice:
"""Device representation for per device throttling."""
def __init__(self, power_switch):
"""Initialize the DINRelay device."""
self._power_switch = power_switch
self._statuslist = None
def get_outlet_status(self, outlet_number):
"""Get status of outlet from cached status list."""
return self._statuslist[outlet_number - 1]
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Fetch new state data for this device."""
self._statuslist = self._power_switch.statuslist()
|
PypiClean
|
/anticp2-1.1.tar.gz/anticp2-1.1/README.md
|
# Anticp2: Prediction, Design and scan of anticancer prptides
AntiCP 2.0 is an updated version of AntiCP, developed to predict and design anticancer peptides with high accuracy. This study utilize largest possible dataset of anticancer and non-anticancer peptides. Main dataset consists of experimentally validated 861 anticancer peptides and 861 non-anticancer or validated antimicrobial peptides. Alternate dataset comprises of 970 anti-cancer peptides and 970 non-anticancer peptides (randomly pickup from Swiss-Prot).
# Reference
Agrawal P., Bhagat D., Mahalwal M., Sharma N., Raghava G.P.S. (2020), AntiCP 2.0: an updated model for predicting anticancer peptides, <a href="https://doi.org/10.1093/bib/bbaa153">Briefings in Bioinformatics, bbaa153</a>
# Web Server
https://webs.iiitd.edu.in/raghava/anticp2/
# Installation
```
pip install anticp2
```
# Introduction
AntiCP2 is developed for predicting, desiging and scanning antcancer peptides. More information on AntiCP2 is abvailble from its web server http://webs.iiitd.edu.in/raghava/anticp2/ . This page provide information about stnadalone version of AntiCP2. Please read/cite following paper for complete information including algorithm behind AntiCP2.
Agrawal P., Bhagat D., Mahalwal M., Sharma N., and Raghava GPS (2020) AntiCP 2.0: an updated model for predicting anticancer peptides. Briefing in Bioinformatics doi: 10.1093/bib/bbaa153
**Models:** In this program, two models have beeen incorporated for predicting anticancer peptides. Model1 is trained on Anti-Cancer and Anti-Microbial peptides, it is default model. Model2 is trained on Anti-Cancer and Non-Anticalcer (or random peptides) peptides.
**Modules/Jobs:** This program implement three modules (job types); i) Predict: for predictin anticancer peptides, ii) Design: for generating all possible peptides and computing Anti-Cancer potential (score) of peptides, iii) Scan: for creating all possible overlapping peptides of given length (window) and computing Anti-Cancer potential (score) of these overlapping peptides.
**Minimum USAGE:** Minimum ussage is "anticp2 -i peptide.fa" where peptide.fa is a input fasta file. This will predict Anti-Cancer potential of sequence in fasta format. It will use other parameters by default. It will save output in "outfile.csv" in CSV (comma seperated variables).
**Full Usage:** Following is complete list of all options, you may get these options by "anticp2 -h"
```
anticp [-h] -i INPUT [-o OUTPUT] [-j {1,2,3}] [-t THRESHOLD]
[-m {1,2}]
[-w {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29}]
[-d {1,2}]
```
**optional arguments:**
```
-h, --help show this help message and exit
-i INPUT, --input INPUT
Input: protein or peptide sequence in FASTA format or single sequence per line in single letter code
-o OUTPUT, --output OUTPUT
Output: File for saving results by default outfile.csv
-j {1,2,3}, --job {1,2,3}
Job Type: 1:predict, 2:design and 3:scan, by default 1
-t THRESHOLD, --threshold THRESHOLD
Threshold: Value between 0 to 1 by default 0.5
-m {1,2}, --model {1,2}
Model: 1: ACP/AMP, 2: ACP/non-ACP, by default 1
-w {5,6,7,..,30}, --winleng
Window Length: 5 to 30 (scan mode only), by default 10
-d {1,2}, --display {1,2}
Display: 1:Anticancer peptide, 2: All peptides, by default 1
```
**Input File:** It allow users to provide input in two format; i) FASTA format (standard) and ii) Simple Format. In case of simple format, file should have one one peptide sequence in a single line in single letter code (eg. peptide.seq). Please note in case of predict and design module (job) length of peptide should be upto 50 amino acids, if more than 50, program will take first 50 residues. In case of of scan module, minimum length of protein/peptide sequence should be more than equal to window length (pattern), see peptide.fa . Please note program will ignore peptides having length less than 5 residues (e.g., protein.fa).
**Output File:** Program will save result in CSV format, in case user do not provide output file name, it will be stored in outfile.csv.
**Threshold:** User should provide threshold between 0 and 1, please note score is propotional to anti-cancer potential of peptide.
# Address for contact
In case of any query please contact
```
Prof. G. P. S. Raghava, Head Department of Computational Biology,
Indraprastha Institute of Information Technology (IIIT),
Okhla Phase III, New Delhi 110020 ; Phone:+91-11-26907444;
Email: [email protected] Web: http://webs.iiitd.edu.in/raghava/
```
|
PypiClean
|
/feature_selection_fdr-0.1.tar.gz/feature_selection_fdr-0.1/feature_selection_fdr/knockoff_features_construction.py
|
import numpy as np
import pandas as pd
import utils
from sklearn.preprocessing import scale
import warnings
# ["ASDP", "selfblocks", 50, 50]
class Knockoff(object):
def __init__(self, X, selection_method, optimization, SDP_use=False, cholesky=False, **kwargs):
"""
Creates knockoff features only; It does not select variables.
"""
self.n, self.p = X.shape
if self.n < self.p:
raise ValueError("Number of datapoints should be at least as large as number of inputs for fixed-X knockoff. Use model-X instead.")
self.kwargs = kwargs
if not isinstance(optimization, (list, tuple)):
optimization = [optimization.lower()]
if len(optimization) == 1:
self.optimization = optimization[0].lower()
if self.optimization.lower() == 'asdp':
self.ASDPapprox_method, self.block_size, self.min_p_ASDP = "selfblocks", 50, 50
# To approximate covariance matrix, approximation method needs to be specified along with block size and minimum size of data for ASDP.
elif len(optimization) == 4:
self.optimization, self.ASDPapprox_method, self.block_size, self.min_p_ASDP = optimization[0].lower(), optimization[1].lower(), optimization[2], optimization[3]
else:
raise ValueError("The optimization is either a string, or a list of size either 1 or 4.\n"+\
" Specify the ASDPapprox_method, block_size, and min_p_ASDP.")
self.selection_method = selection_method.lower()
self.cholesky = cholesky #Method for taking square root of matrix. Choleskyesky is used if cholesky=True, and svd if False.
self.identity_p = np.identity(self.p)
self.SDP_use = SDP_use
# ASDP is used only for model-X version of knockoff method.
# if self.optimization == "asdp" and self.selection_method == "knockoff-fx":
# raise AttributeError("ASDP is not available for fixed-X; Try other optimization errors.")
self.augmented = False
if self.selection_method == "knockoff-fx":
if self.n < 2 * self.p: # Let's augment zero matrix to bottom of feature matrix.
self.augmented = True
X = np.vstack((X, np.zeros((2 * self.p - self.n, self.p))))
self.n = 2 * self.p
warnings.warn("Number of observations is less than twice of number of inputs (p<n<2p). Data augmentation is used!")
# Scaling the inputs: First scale them to have unit norm and then numply them by square root of size of data to
# boost the size of parameters.
self.X = scale(X)
self.U, self.d, self.Vt = np.linalg.svd(self.X, full_matrices=False)
elif self.selection_method == "knockoff-mx":
self.X = X
self.Sigma = np.cov(self.X, rowvar=False)
self.Sigma_inv = np.linalg.solve(self.Sigma, self.identity_p)
def knockoff_features(self):
if self.selection_method == "knockoff-fx":
self.X_tilde = self.fixedX_knockoff_features()
elif self.selection_method == "knockoff-mx":
self.X_tilde = self.modelX_knockoff_features()
if "t" in self.kwargs:
if self.augmented:
self.t = self.augment(self.t)
return(self)
def fixedX_knockoff_features(self):
"""
This method/function creates fixed knockoff features (vs. randomized).
"""
self.min_eigenvalue = np.min(self.d) ** 2
self.C2 , self.s = self.optim()
# taking matrix-square root of the matrix C2, using svd of C2.
C = self.matrix_sqrt(self.C2)
# add zero columns to U and apply Gram-Schmit to find unitary matrix orthogonal to X (or orthogonal to u matrix in svd of X).
U_extend = np.hstack((self.U, np.zeros((self.n, self.p))))
Uorthog = np.linalg.qr(U_extend)[0][:, (self.p):(2 * self.p)]# The right half of the Q matrix in the QR decomposition of U_.
out = self.X - np.dot(self.X, np.dot(self.Sigma_inv, self.s)) + np.dot(Uorthog, C)
return(out)
def modelX_knockoff_features(self):
"""
This method/function evaluates randomized knockoff features (mostly when inpputs are gaussians).
"""
self.min_eigenvalue = np.min(np.linalg.eigvals(self.Sigma))
self.C2 , self.s = self.optim()
scaled_X = scale(self.X, with_std=False)
mu = self.X - np.dot(scaled_X, np.dot(self.Sigma_inv, self.s))
C = self.matrix_sqrt(self.C2)
standard_normal = np.random.normal(0., 1., size=(self.n, self.p))
out = mu + np.dot(standard_normal, C)
return(out)
def optim(self):
"""
Three methods of optimization to find s_j: https://arxiv.org/pdf/1404.5609.pdf
"""
# Using semi-definite programming (SDP):
if self.optimization == "sdp":# http://cvxopt.org/userguide/coneprog.html#s-sdpsolver
if (self.p < 200) or (self.p >= 200 and self.SDP_use == True):
C2, s = self.SDPoptim()
else:
print("WARNING: Due to computational complexity of 'SDP', the 'ASDP' optimization (blocksize = 50, approx_method='selfblocks') is used because p>= 200.")
print("If SDP is of interest anyway, turn of the 'SDP_use' flag.")
C2, s = self.ASDPoptim(50, approx_method="selfblocks")
elif self.optimization == "asdp":# https://statweb.stanford.edu/~candes/papers/MF_knockoffs.pdf
print("WARNING: When 'ASDP' optimization method is used, it's recommended to keep more correlated inputs next to each other.")
if self.p >= self.min_p_ASDP:
C2, s = self.ASDPoptim(self.block_size, approx_method=self.ASDPapprox_method)# selfblocks, cluster, eigen
elif self.p < self.min_p_ASDP:
print("WARNING: Number of features is small relative to min_p_ASDP={}; SDP is used instead of ASDP.".format(self.min_p_ASDP))
C2, s = self.SDPoptim()
# Equating all values of s_j to be 1-eigenvalues of Sigma. If they're larger than 1, we truncate them to 1.
# This is a fast and relatively good method (in terms of inducing a powerful knockoff method.)
elif self.optimization == "samplecorr":# s_j = 1.
s = np.diag(np.abs(.5 - np.abs(np.corrcoef(self.X, rowvar=False)-self.identity_p)).mean(0))
s[s > 1.] = 1.
s[s < 0.] = 0.
C2 = 2. * s - self.Sigma_inv
# Equating all values of s_j to be twice of minimum of eigenvalues of Sigma. If it's larger than 1, we truncate it to 1.
elif self.optimization == "min_eigenvalue":# s_j = min(1, 2*min(eigenvalue(Sigma)))
value = min(1., 2. * self.min_eigenvalue)
s = value * self.identity_p if value != 1. else self.identity_p
C2 = 2. * s - (self.min_eigenvalue**2) * self.Sigma_inv
C2, s = self.make_pos_semi_def(C2, s)
return(C2, s)
def make_pos_semi_def(self, C2, s):
"""
This can be used to correct for round off errors in calculation of SDP solutions; or can be directly used
as an optimization method by incrementally decreasing values of s_j until C2 becomes positive semi-definite.
"""
eps = min(.05, 2 * self.min_eigenvalue)
old_s = self.identity_p
while not self.is_pos_semi_def(C2):
s = s - eps * self.identity_p
s_ = np.diagonal(s).flatten()
s_[np.argwhere(s_ < eps)] = eps
s = np.diag(s_)
C2 = 2. * s - np.dot(s, np.dot(self.Sigma_inv, s))
if np.allclose(old_s, s_):
break
old_s = s_
s_min = np.min(np.diagonal(s))
if np.allclose(s_min, 0.) or s_min < 0.:
raise ValueError("the method has no power because C2 is not positive semi-definite.")
if np.max(np.diagonal(s)) < .1:
C2, s = self.power_optim(C2, s)
return(C2, s)
def is_pos_semi_def(self, x):
return(np.all(np.linalg.eigvals(x) >= 0.))
def power_optim(self, C2, s):
"""
To have more power, let's see if we add some epsilon to each s_j, the matrix C2 is still positive semi-definite.
"""
eps = .05
s_ = s
C2_ = C2
while self.is_pos_semi_def(C2_) and np.max(np.diagonal(s_)) <= 1.:
s = s_
C2 = 2. * s - np.dot(s, np.dot(self.Sigma_inv, s))
s_ = s_ + eps * self.identity_p
C2_ = 2. * s_ - np.dot(s_, np.dot(self.Sigma_inv, s_))
return(C2, s)
def matrix_sqrt(self, C2):
if self.cholesky:
C = np.linalg.cholesky(C2)
elif not self.cholesky:
C2_V, C2_d, C2_Vt = np.linalg.svd(C2, full_matrices=False)
C2_d[C2_d < 0.] = 0.
C = np.dot(np.diag(np.sqrt(C2_d)), C2_Vt)
return(C)
def SDPoptim(self):
"""
Defining C2 of the kncokoff paper using SDP optimization method:
"""
sol = utils.sdp(self.Sigma)
s = np.diag(np.array(sol).flatten())
C2 = 2. * s - np.dot(s, np.dot(self.Sigma_inv, s))
return(C2, s)
def ASDPoptim(self, block_size, approx_method):
"""
Defining C2 of the kncokoff paper using approximate SDP optimization method:
https://statweb.stanford.edu/~candes/papers/MF_knockoffs.pdf
"""
sol = utils.asdp(self.Sigma, block_size, approx_method)
s = np.diag(np.array(sol).flatten())
C2 = 2. * s - np.dot(s, np.dot(self.Sigma_inv, s))
return(C2, s)
def augment(self, t):
"""
This function augments the response variable when we want to create fixed knockoff features anyway.
The assumption is that 2p > n, we'd like to augment 2p-n observations to the response.
"""
n_before = t.shape[0]
U = self.U[:n_before, :]
Q = np.identity(n_before) - np.dot(U, U.T)#np.dot(self.X, np.linalg.solve(np.dot(self.X.T, self.X), self.X.T))
sigma_hat = np.sqrt(np.dot(t.T, np.dot(Q, t)))[0, 0]
y_hat = np.random.normal(0., sigma_hat, size=(2 * self.p - n_before, 1))
y = np.vstack((t, y_hat))
return(y)
#Check if knockoff-fx
# print(np.allclose(np.dot(out.T, self.X), self.Sigma-self.s))
# # print(np.isclose(np.dot(out.T, self.X), self.Sigma-self.s))
# print(np.sum(np.dot(out.T, self.X) - (self.Sigma-self.s)))
# print(np.allclose(np.dot(out.T, out), self.Sigma))
# # print(np.isclose(np.dot(out.T, out), self.Sigma))
# print(np.sum(np.dot(out.T, out) - (self.Sigma)))
# exit()
|
PypiClean
|
/pysit-0.5b3.zip/pysit-0.5b3/docs/exercises/part_3.rst
|
*****************************************
Part 3: The Linear Problem and Validation
*****************************************
In this exercise you will solve the *linear* forward modeling equations and
verify that the adjoint conditions hold for your code. The definition of the
adjoint of a linear operator states that
.. math::
\left< F\delta m, d \right> _{\mathcal{D}} = \left< \delta m, F^{*}d
\right> _{\mathcal{M}},
where :math:`\left< \cdot, \cdot \right> _{\mathcal{D}}` and :math:`\left<
\cdot, \cdot \right> _{\mathcal{M}}` are inner products in the data and model
spaces, respectively. One test to see if your migration operator :math:`F^*`
is working correctly is to test if this relationship holds to high precision
for any pair of :math:`d` and :math:`\delta m`.
Linear Forward Operator
=======================
To implement the adjoint test, you will need to solve the linear modeling
equations, which are derived in the `notes
<http://math.mit.edu/icg/resources/notes325.pdf>`_,
.. math::
(\frac{1}{c_0(x)^2}\partial_{tt}-\partial_{xx})u_1(x,t) & = -\delta m(x) \partial_{tt}u_0(x,t), \\
(\frac{1}{c_0(x)}\partial_t-\partial_x)u_1(0,t) & = 0, \\
(\frac{1}{c_0(x)}\partial_t+\partial_x)u_1(1,t) & = 0, \\
u_1(x,t) & = 0 \quad\text{for}\quad t \le 0,
where :math:`u_1` is the Born scattered field and equivalently :math:`F\delta
m = u_1`.
.. topic:: Problem 3.1
Write a function ``linear_sources(dm, u0s, config)`` that takes an
arbitrary model perturbation ``dm`` and a time sequence of incident
wavefields ``u0s`` and generates the linear source wavefields (the
right-hand-sides of the linear modeling equations). Functions you have
previously written should be useful.
.. topic:: Problem 3.2
Use your ``leap_frog`` function to solve for the linear forward wavefield
:math:`u_1` due to a perturbation :math:`\delta m`. Use this code to write
a function ``linear_forward_operator(C0, dm, config)`` which returns a
tuple containing the wavefields and the sampled data.
Adjoint Validation
==================
When sampling is accounted for, the adjoint condition requires that,
.. math::
\left< \mathbf{S}F\delta m, d \right> _{\mathcal{D}} = \left< \delta m,
F^{*}\mathbf{S}^*d \right> _{\mathcal{M}},
hold to high precision.
.. topic:: Problem 3.3
Verify that the adjoint condition is satisfied by your implementation of
the linear forward modeling and migration operators. Be careful to take
into account the differences in the model and data inner product. Write a
function ``adjoint_condition(C0, config)`` that implements this test. How
accurate is the relationship? It should be accurate to machine precision
for random values of the data :math:`d` and the model perturbation
:math:`\delta m`. Be sure that you define :math:`\delta m(0) = \delta m(1)
= 0`, as it is nonphysical to have sources on an absorbing boundary.
Consider modifying your ``construct_matrices`` function to accept a
``config`` key ``'bc'``, which allows you to toggle between absorbing and
homogeneous Dirichlet boundaries. This result should still hold.
.. code:: python
print "Absorbing BC"
adjoint_condition(C0, config)
print "Dirichlet BC"
config['bc'] = 'dirichlet'
adjoint_condition(C0, config)
Bonus Problems
==============
**Bonus Problem 3.4:** It is compuationally intractible to compute the
linear forward operator :math:`F` directly. Why? If you wanted to
explicitely compute this operator, how would you do it?
|
PypiClean
|
/aws_service_catalog_puppet-0.242.0.tar.gz/aws_service_catalog_puppet-0.242.0/servicecatalog_puppet/workflow/workspaces/terminate_workspace_task.py
|
import io
import zipfile
import luigi
from servicecatalog_puppet import constants, serialisation_utils
from servicecatalog_puppet.workflow.dependencies import tasks
class TerminateWorkspaceTask(tasks.TaskWithParameters):
workspace_name = luigi.Parameter()
region = luigi.Parameter()
account_id = luigi.Parameter()
bucket = luigi.Parameter()
key = luigi.Parameter()
version_id = luigi.Parameter()
ssm_param_inputs = luigi.ListParameter(default=[], significant=False)
launch_parameters = luigi.DictParameter(default={}, significant=False)
manifest_parameters = luigi.DictParameter(default={}, significant=False)
account_parameters = luigi.DictParameter(default={}, significant=False)
retry_count = luigi.IntParameter(default=1, significant=False)
worker_timeout = luigi.IntParameter(default=0, significant=False)
ssm_param_outputs = luigi.ListParameter(default=[], significant=False)
requested_priority = luigi.IntParameter(significant=False, default=0)
execution = luigi.Parameter()
manifest_file_path = luigi.Parameter()
section_name = constants.WORKSPACES
cachable_level = constants.CACHE_LEVEL_RUN
@property
def item_name(self):
return self.workspace_name
def params_for_results_display(self):
return {
"puppet_account_id": self.puppet_account_id,
"workspace_name": self.workspace_name,
"region": self.region,
"account_id": self.account_id,
}
def run(self):
with self.hub_client("s3") as s3:
options = (
zipfile.ZipFile(
io.BytesIO(
s3.get_object(Bucket=self.bucket, Key=self.key)
.get("Body")
.read()
)
)
.open(f"options.json", "r")
.read()
)
options = serialisation_utils.json_loads(options)
zip_file_path = f"s3://{self.bucket}/{self.key}"
state_file_path = f"s3://sc-puppet-state-{self.account_id}/workspace/{self.workspace_name}/{self.account_id}/{self.region}.zip"
with self.spoke_client("codebuild") as codebuild:
parameters_to_use = [
dict(name="TARGET_ACCOUNT", value=self.account_id, type="PLAINTEXT",),
dict(name="STATE_FILE", value=state_file_path, type="PLAINTEXT",),
dict(name="ZIP", value=zip_file_path, type="PLAINTEXT",),
]
for parameter_name, parameter_value in self.get_parameter_values().items():
parameters_to_use.append(
dict(
name=f"TF_VAR_{parameter_name}",
value=f"{parameter_value}",
type="PLAINTEXT",
),
)
parameters_to_use.append(
dict(
name="TERRAFORM_VERSION",
value=options.get("Terraform", {}).get(
"Version", constants.DEFAULT_TERRAFORM_VERSION_VALUE
),
type="PLAINTEXT",
),
)
codebuild.start_build_and_wait_for_completion(
projectName=constants.TERMINATE_TERRAFORM_PROJECT_NAME,
environmentVariablesOverride=parameters_to_use,
)
self.write_empty_output()
|
PypiClean
|
/Products.Marshall-2.4.1.tar.gz/Products.Marshall-2.4.1/Products/Marshall/doc/README.txt
|
========
Marshall
========
Installation
------------
Installing is as simple as clicking a button.
However, by default only the ``marshaller_tool`` is
installed. After that, you need to add some ``marshaller predicates``
by yourself. You can do that through the ZMI. The interface should be
verbose enough to tell you all that you need to know to be able to
make a simple setup.
Let's go through a step-by-step example on how to setup the ``Marshaller
Registry`` programatically.
If you follow the steps listed here, you will have a working setup
that can handle uploading either a ``ATXML`` file, or some other file
containing whatever you like.
Assuming you already installed the ``Marshall`` and the
``ATContentTypes`` products using the quick-installer tool, the next step
is to add a couple marshaller predicates.
Our setup will consist of two predicates: one for handling ``ATXML``
files, and another dummy predicate to be used as a ``fallback``, ie:
if it's not ATXML, use the dummy predicate.
Add a predicate of the ``xmlns_attr`` kind. This kind of predicate is
used to check for the existence of a certain attribute or element in a
XML file. If the predicate matches, we will map it to the ``atxml``
marshaller (component_name).
>>> portal = layer['portal']
>>> from plone.testing import z2
>>> from plone.app.testing import SITE_OWNER_NAME
>>> z2.login(layer['app']['acl_users'], SITE_OWNER_NAME)
>>> from Products.Marshall import registry
>>> _ = registry.manage_addRegistry(portal)
>>> from Products.Marshall.predicates import add_predicate
>>> from Products.Marshall.config import TOOL_ID as marshall_tool_id
>>> from Products.Marshall.config import AT_NS, CMF_NS
>>> from Products.CMFCore.utils import getToolByName
>>> tool = getToolByName(portal, marshall_tool_id)
>>> p = add_predicate(tool, id='atxml',
... title='ATXML Predicate',
... predicate='xmlns_attr',
... expression='',
... component_name='atxml')
Then edit the predicate so that it matches on the existence of a
element named ``metadata`` using the ``AT_NS`` namespace.
>>> p.edit(element_ns=AT_NS, element_name='metadata', value=None)
Add a default predicate, that just maps to the ``primary_field``
marshaller (which just stuffs the content of the uploaded file into
the object's primary field).
>>> p = add_predicate(tool, id='default',
... title='Default Marshaller',
... predicate='default',
... expression='',
... component_name='primary_field')
The next step is making your Archetypes-based schema aware of the
Marshaller Registry, by making it use the ``ControlledMarshaller``
implementation.
For our example, we will use the ``ATDocument`` class from the
``ATContentTypes`` product.
>>> from Products.ATContentTypes.atct import ATDocument
>>> from Products.Marshall import ControlledMarshaller
Save current marshaller implementation, and register
``ControlledMarshaller`` on it's place.
>>> old_marshall = ATDocument.schema.getLayerImpl('marshall')
>>> ATDocument.schema.registerLayer('marshall',
... ControlledMarshaller())
At this point, our Article should be able to use the Marshaller
Registry to decide what Marshaller to use at runtime.
>>> from Products.Archetypes.tests.utils import makeContent
>>> article = makeContent(portal, 'Document', 'article')
>>> article.getId()
'article'
>>> article.Title()
''
>>> article.setTitle('Example Article')
>>> article.Title()
'Example Article'
>>> article.getPortalTypeName()
'Document'
>>> article.getText()
''
Upload a very simple ATXML file and make sure it used the ATXML
Marshaller by checking that the Title got changed and the body is
still empty. Note we also support CDATA sections, so we'll stick some
stuff into the 'blurb' field using CDATA.
>>> xml_input = """
... <?xml version="1.0" ?>
... <metadata xmlns="http://plone.org/ns/archetypes/"
... xmlns:dc="http://purl.org/dc/elements/1.1/"
... xmlns:xmp="adobe:ns:meta">
... <dc:title>
... Some Title
... </dc:title>
... <xmp:CreateDate>
... 2004-01-01T00:02:04Z
... </xmp:CreateDate>
... <field id="expirationDate">
... 2004-09-09T09:09:08Z
... </field>
... <field id="text">
... Here is some Text
... </field>
... </metadata>"""
>>> from Testing.ZopeTestCase.zopedoctest.functional import http
>>> from Testing.ZopeTestCase.sandbox import AppZapper
>>> from plone.app.testing import SITE_OWNER_NAME, SITE_OWNER_PASSWORD
>>> AppZapper().set(layer['app'])
>>> print http(r"""
... PUT /plone/article HTTP/1.1
... Content-Type: text/xml
... Authorization: Basic %s:%s
... %s""" % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD, xml_input), handle_errors=False)
HTTP/1.1 204 No Content...
>>> article.Title()
'Some Title'
>>> article.getText()
'<p>Here is some Text</p>'
>>> article.created().ISO8601()
'2004-01-01T00:02:04+00:00'
>>> article.expires().ISO8601()
'2004-09-09T09:09:08+00:00'
>>> article.EffectiveDate()
'None'
Upload a text file (in this case, 'text/x-rst') and make sure the body
field was updated with the uploaded file contents.
>>> rst_input = """
... Title
... =====
...
... Some Text
... """
>>> print http(r"""
... PUT /plone/article HTTP/1.1
... Content-Type: text/x-rst
... Authorization: Basic %s:%s
... %s""" % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD, rst_input), handle_errors=False)
HTTP/1.1 204 No Content...
>>> article.Title()
'Some Title'
Get the ``raw`` body value. Using getBody() would return the rendered HTML.
>>> print article.getField('text').getRaw(article)
Title
=====
<BLANKLINE>
Some Text
<BLANKLINE>
Now, just restore the previous marshaller, as to leave everything in
the same state it was found:
>>> AppZapper().clear()
>>> ATDocument.schema.registerLayer('marshall',
... old_marshall)
|
PypiClean
|
/pyjx-html5-0.1.0.tar.gz/pyjx-html5-0.1.0/pyjswidgets/pyjamas/ui/Panel.py
|
from pyjamas import Factory
from pyjamas import DOM
from pyjamas.ui.Widget import Widget
class PanelBase(object):
def clear(self):
""" use this method, due to list changing as it's being iterated.
also, it's possible to use this method even
"""
children = []
for child in self.__iter__():
children.append(child)
for child in children:
self.remove(child)
def doAttachChildren(self):
for child in self:
child.onAttach()
def doDetachChildren(self):
for child in self:
child.onDetach()
def getWidgetCount(self):
return len(self.getChildren())
def getWidget(self, index):
return self.getChildren()[index]
def getIndexedChild(self, index):
return self.getWidget(index)
def addIndexedItem(self, index, child):
self.add(child)
def getWidgetIndex(self, child):
return self.getChildren().index(child)
def getChildren(self):
return self.children # assumes self.children: override if needed.
def setWidget(self, index, widget):
""" Insert (or optionally replace) the widget at the given index
with a new one
"""
existing = self.getWidget(index)
if existing is not None:
self.remove(existing)
self.insert(widget, index)
def append(self, widget):
return self.add(widget)
def __setitem__(self, index, widget):
return self.setWidget(index, widget)
def __getitem__(self, index):
return self.getWidget(index)
def __len__(self):
return len(self.getChildren())
def __nonzero__(self):
return self is not None
def __iter__(self):
return self.getChildren().__iter__()
class Panel(PanelBase, Widget):
def __init__(self, **kwargs):
self.children = []
PanelBase.__init__(self)
Widget.__init__(self, **kwargs)
def disown(self, widget):
if widget.getParent() is not self:
raise Exception("widget %s is not a child of this panel %s" % \
(str(widget), str(self)))
element = widget.getElement()
widget.setParent(None)
parentElement = DOM.getParent(element)
if parentElement is not None:
DOM.removeChild(parentElement, element)
def adopt(self, widget, container):
if container is not None:
widget.removeFromParent()
DOM.appendChild(container, widget.getElement())
widget.setParent(self)
Factory.registerClass('pyjamas.ui.Panel', 'Panel', Panel)
|
PypiClean
|
/actingweb-2.6.5.tar.gz/actingweb-2.6.5/LICENSE.rst
|
License
=======
Copyright -2016 Cisco Systems Inc
Copyright 2017, 2018 Greger Teigre Wedel
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/groups/item/calendar/calendar_view/item/instances/item/attachments/item/attachment_item_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ..........models import attachment
from ..........models.o_data_errors import o_data_error
class AttachmentItemRequestBuilder():
"""
Provides operations to manage the attachments property of the microsoft.graph.event entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new AttachmentItemRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/calendar/calendarView/{event%2Did}/instances/{event%2Did1}/attachments/{attachment%2Did}{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_delete_request_information(self,request_configuration: Optional[AttachmentItemRequestBuilderDeleteRequestConfiguration] = None) -> RequestInformation:
"""
Delete navigation property attachments for groups
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.DELETE
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
return request_info
def create_get_request_information(self,request_configuration: Optional[AttachmentItemRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The collection of FileAttachment, ItemAttachment, and referenceAttachment attachments for the event. Navigation property. Read-only. Nullable.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
async def delete(self,request_configuration: Optional[AttachmentItemRequestBuilderDeleteRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> None:
"""
Delete navigation property attachments for groups
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
"""
request_info = self.create_delete_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, response_handler, error_mapping)
async def get(self,request_configuration: Optional[AttachmentItemRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[attachment.Attachment]:
"""
The collection of FileAttachment, ItemAttachment, and referenceAttachment attachments for the event. Navigation property. Read-only. Nullable.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[attachment.Attachment]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, attachment.Attachment, response_handler, error_mapping)
@dataclass
class AttachmentItemRequestBuilderDeleteRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
@dataclass
class AttachmentItemRequestBuilderGetQueryParameters():
"""
The collection of FileAttachment, ItemAttachment, and referenceAttachment attachments for the event. Navigation property. Read-only. Nullable.
"""
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
@dataclass
class AttachmentItemRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[AttachmentItemRequestBuilder.AttachmentItemRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/lisa-plugin-Wifiledlamps-1.2.2.tar.gz/lisa-plugin-Wifiledlamps-1.2.2/lisa/plugins/Wifiledlamps/web/api.py
|
from tastypie import authorization
from django.conf.urls import patterns, url, include
from tastypie import resources
from tastypie.utils import trailing_slash
import json
from lisa.server.web.weblisa.api.mixins import CustomApiKeyAuthentication
from tastypie.authentication import MultiAuthentication, SessionAuthentication
import wifileds
from ..modules.wifiledlamps import Wifiledlamps
class WifiledlampsResource(resources.Resource):
def __init__(self):
super(WifiledlampsResource, self).__init__()
self.Plugin = Wifiledlamps()
class Meta:
resource_name = 'wifiledlamps'
allowed_methods = ()
authorization = authorization.Authorization()
object_class = Wifiledlamps
authentication = MultiAuthentication(CustomApiKeyAuthentication())
extra_actions = [
{
'name': 'switch',
'http_method': 'GET',
'resource_type': 'list',
'fields': {
'on_off': {
'type': 'string',
'required': True,
'description': 'on or off',
'paramType': 'body'
},
'rooms': {
'type': 'list',
'required': True,
'description': "Provide a list of rooms : ['all','bedroom','kitchen'] ...",
'paramType': 'body'
},
'color': {
'type': 'string',
'required': False,
'description': "Color to use",
'paramType': 'body'
},
'intensity': {
'type': 'string',
'required': False,
'description': 'Intensity to use',
'paramType': 'body'
},
'groups': {
'type': 'list',
'required': False,
'description': "Which groups to use (1,2,3,4). By default, will use all.",
'paramType': 'body'
},
'lamptype': {
'type': 'string',
'required': False,
'description': "rgb, white or rgbw. By default, will use rgbw.",
'paramType': 'body'
},
}
}
]
def base_urls(self):
return [
url(r"^plugin/(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^plugin/(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/switch%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('switch'), name="api_wifiledlamps_switch"),
]
def switch(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
from tastypie.http import HttpAccepted, HttpNotModified
on_off = request.GET.get('on_off', '')
rooms = request.GET.get('rooms', '')
groups = request.GET.get('groups', [1, 2, 3, 4])
color = request.GET.get('color', '')
intensity = request.GET.get('intensity', '')
lamptype = request.GET.get('lamptype', 'rgbw')
for room in self.Plugin.configuration_plugin['configuration']['controller']:
if room['room'] in rooms:
self.led_connection = wifileds.limitlessled.connect(room['address'],
room['port'])
if on_off == 'on':
if groups:
for group in groups:
if lamptype == 'white':
self.led_connection.white.zone_on(group)
elif lamptype == 'rgb':
self.led_connection.rgb.zone_on(group)
else:
self.led_connection.rgbw.zone_on(group)
else:
if lamptype == 'white':
self.led_connection.white.all_on()
elif lamptype == 'rgb':
self.led_connection.rgb.all_on()
else:
self.led_connection.rgbw.all_on()
else:
if groups:
for group in groups:
if lamptype == 'white':
self.led_connection.white.zone_off(group)
elif lamptype == 'rgb':
self.led_connection.rgb.zone_off(group)
else:
self.led_connection.rgbw.zone_off(group)
else:
if lamptype == 'white':
self.led_connection.white.all_off()
elif lamptype == 'rgb':
self.led_connection.rgb.all_off()
else:
self.led_connection.rgbw.all_off()
self.log_throttled_access(request)
return self.create_response(request, {'status': 'success', 'log': "ok"}, HttpAccepted)
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.