id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/readable-stream/lib/_stream_readable.js
|
'use strict';
module.exports = Readable;
/*<replacement>*/
var Duplex;
/*</replacement>*/
Readable.ReadableState = ReadableState;
/*<replacement>*/
var EE = require('events').EventEmitter;
var EElistenerCount = function EElistenerCount(emitter, type) {
return emitter.listeners(type).length;
};
/*</replacement>*/
/*<replacement>*/
var Stream = require('./internal/streams/stream');
/*</replacement>*/
var Buffer = require('buffer').Buffer;
var OurUint8Array = (typeof global !== 'undefined' ? global : typeof window !== 'undefined' ? window : typeof self !== 'undefined' ? self : {}).Uint8Array || function () {};
function _uint8ArrayToBuffer(chunk) {
return Buffer.from(chunk);
}
function _isUint8Array(obj) {
return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
}
/*<replacement>*/
var debugUtil = require('util');
var debug;
if (debugUtil && debugUtil.debuglog) {
debug = debugUtil.debuglog('stream');
} else {
debug = function debug() {};
}
/*</replacement>*/
var BufferList = require('./internal/streams/buffer_list');
var destroyImpl = require('./internal/streams/destroy');
var _require = require('./internal/streams/state'),
getHighWaterMark = _require.getHighWaterMark;
var _require$codes = require('../errors').codes,
ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE,
ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF,
ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED,
ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT;
// Lazy loaded to improve the startup performance.
var StringDecoder;
var createReadableStreamAsyncIterator;
var from;
require('inherits')(Readable, Stream);
var errorOrDestroy = destroyImpl.errorOrDestroy;
var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume'];
function prependListener(emitter, event, fn) {
// Sadly this is not cacheable as some libraries bundle their own
// event emitter implementation with them.
if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn);
// This is a hack to make sure that our error handler is attached before any
// userland ones. NEVER DO THIS. This is here only because this code needs
// to continue to work with older versions of Node.js that do not include
// the prependListener() method. The goal is to eventually remove this hack.
if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]];
}
function ReadableState(options, stream, isDuplex) {
Duplex = Duplex || require('./_stream_duplex');
options = options || {};
// Duplex streams are both readable and writable, but share
// the same options object.
// However, some cases require setting options to different
// values for the readable and the writable sides of the duplex stream.
// These options can be provided separately as readableXXX and writableXXX.
if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex;
// object stream flag. Used to make read(n) ignore n and to
// make all the buffer merging and length checks go away
this.objectMode = !!options.objectMode;
if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode;
// the point at which it stops calling _read() to fill the buffer
// Note: 0 is a valid value, means "don't call _read preemptively ever"
this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex);
// A linked list is used to store data chunks instead of an array because the
// linked list can remove elements from the beginning faster than
// array.shift()
this.buffer = new BufferList();
this.length = 0;
this.pipes = null;
this.pipesCount = 0;
this.flowing = null;
this.ended = false;
this.endEmitted = false;
this.reading = false;
// a flag to be able to tell if the event 'readable'/'data' is emitted
// immediately, or on a later tick. We set this to true at first, because
// any actions that shouldn't happen until "later" should generally also
// not happen before the first read call.
this.sync = true;
// whenever we return null, then we set a flag to say
// that we're awaiting a 'readable' event emission.
this.needReadable = false;
this.emittedReadable = false;
this.readableListening = false;
this.resumeScheduled = false;
this.paused = true;
// Should close be emitted on destroy. Defaults to true.
this.emitClose = options.emitClose !== false;
// Should .destroy() be called after 'end' (and potentially 'finish')
this.autoDestroy = !!options.autoDestroy;
// has it been destroyed
this.destroyed = false;
// Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8';
// the number of writers that are awaiting a drain event in .pipe()s
this.awaitDrain = 0;
// if true, a maybeReadMore has been scheduled
this.readingMore = false;
this.decoder = null;
this.encoding = null;
if (options.encoding) {
if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
this.decoder = new StringDecoder(options.encoding);
this.encoding = options.encoding;
}
}
function Readable(options) {
Duplex = Duplex || require('./_stream_duplex');
if (!(this instanceof Readable)) return new Readable(options);
// Checking for a Stream.Duplex instance is faster here instead of inside
// the ReadableState constructor, at least with V8 6.5
var isDuplex = this instanceof Duplex;
this._readableState = new ReadableState(options, this, isDuplex);
// legacy
this.readable = true;
if (options) {
if (typeof options.read === 'function') this._read = options.read;
if (typeof options.destroy === 'function') this._destroy = options.destroy;
}
Stream.call(this);
}
Object.defineProperty(Readable.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._readableState === undefined) {
return false;
}
return this._readableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (!this._readableState) {
return;
}
// backward compatibility, the user is explicitly
// managing destroyed
this._readableState.destroyed = value;
}
});
Readable.prototype.destroy = destroyImpl.destroy;
Readable.prototype._undestroy = destroyImpl.undestroy;
Readable.prototype._destroy = function (err, cb) {
cb(err);
};
// Manually shove something into the read() buffer.
// This returns true if the highWaterMark has not been hit yet,
// similar to how Writable.write() returns true if you should
// write() some more.
Readable.prototype.push = function (chunk, encoding) {
var state = this._readableState;
var skipChunkCheck;
if (!state.objectMode) {
if (typeof chunk === 'string') {
encoding = encoding || state.defaultEncoding;
if (encoding !== state.encoding) {
chunk = Buffer.from(chunk, encoding);
encoding = '';
}
skipChunkCheck = true;
}
} else {
skipChunkCheck = true;
}
return readableAddChunk(this, chunk, encoding, false, skipChunkCheck);
};
// Unshift should *always* be something directly out of read()
Readable.prototype.unshift = function (chunk) {
return readableAddChunk(this, chunk, null, true, false);
};
function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) {
debug('readableAddChunk', chunk);
var state = stream._readableState;
if (chunk === null) {
state.reading = false;
onEofChunk(stream, state);
} else {
var er;
if (!skipChunkCheck) er = chunkInvalid(state, chunk);
if (er) {
errorOrDestroy(stream, er);
} else if (state.objectMode || chunk && chunk.length > 0) {
if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) {
chunk = _uint8ArrayToBuffer(chunk);
}
if (addToFront) {
if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true);
} else if (state.ended) {
errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF());
} else if (state.destroyed) {
return false;
} else {
state.reading = false;
if (state.decoder && !encoding) {
chunk = state.decoder.write(chunk);
if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state);
} else {
addChunk(stream, state, chunk, false);
}
}
} else if (!addToFront) {
state.reading = false;
maybeReadMore(stream, state);
}
}
// We can push more data if we are below the highWaterMark.
// Also, if we have no data yet, we can stand some more bytes.
// This is to work around cases where hwm=0, such as the repl.
return !state.ended && (state.length < state.highWaterMark || state.length === 0);
}
function addChunk(stream, state, chunk, addToFront) {
if (state.flowing && state.length === 0 && !state.sync) {
state.awaitDrain = 0;
stream.emit('data', chunk);
} else {
// update the buffer info.
state.length += state.objectMode ? 1 : chunk.length;
if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk);
if (state.needReadable) emitReadable(stream);
}
maybeReadMore(stream, state);
}
function chunkInvalid(state, chunk) {
var er;
if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk);
}
return er;
}
Readable.prototype.isPaused = function () {
return this._readableState.flowing === false;
};
// backwards compatibility.
Readable.prototype.setEncoding = function (enc) {
if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
var decoder = new StringDecoder(enc);
this._readableState.decoder = decoder;
// If setEncoding(null), decoder.encoding equals utf8
this._readableState.encoding = this._readableState.decoder.encoding;
// Iterate over current buffer to convert already stored Buffers:
var p = this._readableState.buffer.head;
var content = '';
while (p !== null) {
content += decoder.write(p.data);
p = p.next;
}
this._readableState.buffer.clear();
if (content !== '') this._readableState.buffer.push(content);
this._readableState.length = content.length;
return this;
};
// Don't raise the hwm > 1GB
var MAX_HWM = 0x40000000;
function computeNewHighWaterMark(n) {
if (n >= MAX_HWM) {
// TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE.
n = MAX_HWM;
} else {
// Get the next highest power of 2 to prevent increasing hwm excessively in
// tiny amounts
n--;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
n++;
}
return n;
}
// This function is designed to be inlinable, so please take care when making
// changes to the function body.
function howMuchToRead(n, state) {
if (n <= 0 || state.length === 0 && state.ended) return 0;
if (state.objectMode) return 1;
if (n !== n) {
// Only flow one buffer at a time
if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length;
}
// If we're asking for more than the current hwm, then raise the hwm.
if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n);
if (n <= state.length) return n;
// Don't have enough
if (!state.ended) {
state.needReadable = true;
return 0;
}
return state.length;
}
// you can override either this method, or the async _read(n) below.
Readable.prototype.read = function (n) {
debug('read', n);
n = parseInt(n, 10);
var state = this._readableState;
var nOrig = n;
if (n !== 0) state.emittedReadable = false;
// if we're doing read(0) to trigger a readable event, but we
// already have a bunch of data in the buffer, then just trigger
// the 'readable' event and move on.
if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) {
debug('read: emitReadable', state.length, state.ended);
if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this);
return null;
}
n = howMuchToRead(n, state);
// if we've ended, and we're now clear, then finish it up.
if (n === 0 && state.ended) {
if (state.length === 0) endReadable(this);
return null;
}
// All the actual chunk generation logic needs to be
// *below* the call to _read. The reason is that in certain
// synthetic stream cases, such as passthrough streams, _read
// may be a completely synchronous operation which may change
// the state of the read buffer, providing enough data when
// before there was *not* enough.
//
// So, the steps are:
// 1. Figure out what the state of things will be after we do
// a read from the buffer.
//
// 2. If that resulting state will trigger a _read, then call _read.
// Note that this may be asynchronous, or synchronous. Yes, it is
// deeply ugly to write APIs this way, but that still doesn't mean
// that the Readable class should behave improperly, as streams are
// designed to be sync/async agnostic.
// Take note if the _read call is sync or async (ie, if the read call
// has returned yet), so that we know whether or not it's safe to emit
// 'readable' etc.
//
// 3. Actually pull the requested chunks out of the buffer and return.
// if we need a readable event, then we need to do some reading.
var doRead = state.needReadable;
debug('need readable', doRead);
// if we currently have less than the highWaterMark, then also read some
if (state.length === 0 || state.length - n < state.highWaterMark) {
doRead = true;
debug('length less than watermark', doRead);
}
// however, if we've ended, then there's no point, and if we're already
// reading, then it's unnecessary.
if (state.ended || state.reading) {
doRead = false;
debug('reading or ended', doRead);
} else if (doRead) {
debug('do read');
state.reading = true;
state.sync = true;
// if the length is currently zero, then we *need* a readable event.
if (state.length === 0) state.needReadable = true;
// call internal read method
this._read(state.highWaterMark);
state.sync = false;
// If _read pushed data synchronously, then `reading` will be false,
// and we need to re-evaluate how much data we can return to the user.
if (!state.reading) n = howMuchToRead(nOrig, state);
}
var ret;
if (n > 0) ret = fromList(n, state);else ret = null;
if (ret === null) {
state.needReadable = state.length <= state.highWaterMark;
n = 0;
} else {
state.length -= n;
state.awaitDrain = 0;
}
if (state.length === 0) {
// If we have nothing in the buffer, then we want to know
// as soon as we *do* get something into the buffer.
if (!state.ended) state.needReadable = true;
// If we tried to read() past the EOF, then emit end on the next tick.
if (nOrig !== n && state.ended) endReadable(this);
}
if (ret !== null) this.emit('data', ret);
return ret;
};
function onEofChunk(stream, state) {
debug('onEofChunk');
if (state.ended) return;
if (state.decoder) {
var chunk = state.decoder.end();
if (chunk && chunk.length) {
state.buffer.push(chunk);
state.length += state.objectMode ? 1 : chunk.length;
}
}
state.ended = true;
if (state.sync) {
// if we are sync, wait until next tick to emit the data.
// Otherwise we risk emitting data in the flow()
// the readable code triggers during a read() call
emitReadable(stream);
} else {
// emit 'readable' now to make sure it gets picked up.
state.needReadable = false;
if (!state.emittedReadable) {
state.emittedReadable = true;
emitReadable_(stream);
}
}
}
// Don't emit readable right away in sync mode, because this can trigger
// another read() call => stack overflow. This way, it might trigger
// a nextTick recursion warning, but that's not so bad.
function emitReadable(stream) {
var state = stream._readableState;
debug('emitReadable', state.needReadable, state.emittedReadable);
state.needReadable = false;
if (!state.emittedReadable) {
debug('emitReadable', state.flowing);
state.emittedReadable = true;
process.nextTick(emitReadable_, stream);
}
}
function emitReadable_(stream) {
var state = stream._readableState;
debug('emitReadable_', state.destroyed, state.length, state.ended);
if (!state.destroyed && (state.length || state.ended)) {
stream.emit('readable');
state.emittedReadable = false;
}
// The stream needs another readable event if
// 1. It is not flowing, as the flow mechanism will take
// care of it.
// 2. It is not ended.
// 3. It is below the highWaterMark, so we can schedule
// another readable later.
state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark;
flow(stream);
}
// at this point, the user has presumably seen the 'readable' event,
// and called read() to consume some data. that may have triggered
// in turn another _read(n) call, in which case reading = true if
// it's in progress.
// However, if we're not ended, or reading, and the length < hwm,
// then go ahead and try to read some more preemptively.
function maybeReadMore(stream, state) {
if (!state.readingMore) {
state.readingMore = true;
process.nextTick(maybeReadMore_, stream, state);
}
}
function maybeReadMore_(stream, state) {
// Attempt to read more data if we should.
//
// The conditions for reading more data are (one of):
// - Not enough data buffered (state.length < state.highWaterMark). The loop
// is responsible for filling the buffer with enough data if such data
// is available. If highWaterMark is 0 and we are not in the flowing mode
// we should _not_ attempt to buffer any extra data. We'll get more data
// when the stream consumer calls read() instead.
// - No data in the buffer, and the stream is in flowing mode. In this mode
// the loop below is responsible for ensuring read() is called. Failing to
// call read here would abort the flow and there's no other mechanism for
// continuing the flow if the stream consumer has just subscribed to the
// 'data' event.
//
// In addition to the above conditions to keep reading data, the following
// conditions prevent the data from being read:
// - The stream has ended (state.ended).
// - There is already a pending 'read' operation (state.reading). This is a
// case where the the stream has called the implementation defined _read()
// method, but they are processing the call asynchronously and have _not_
// called push() with new data. In this case we skip performing more
// read()s. The execution ends in this method again after the _read() ends
// up calling push() with more data.
while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) {
var len = state.length;
debug('maybeReadMore read 0');
stream.read(0);
if (len === state.length)
// didn't get any data, stop spinning.
break;
}
state.readingMore = false;
}
// abstract method. to be overridden in specific implementation classes.
// call cb(er, data) where data is <= n in length.
// for virtual (non-string, non-buffer) streams, "length" is somewhat
// arbitrary, and perhaps not very meaningful.
Readable.prototype._read = function (n) {
errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()'));
};
Readable.prototype.pipe = function (dest, pipeOpts) {
var src = this;
var state = this._readableState;
switch (state.pipesCount) {
case 0:
state.pipes = dest;
break;
case 1:
state.pipes = [state.pipes, dest];
break;
default:
state.pipes.push(dest);
break;
}
state.pipesCount += 1;
debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);
var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr;
var endFn = doEnd ? onend : unpipe;
if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn);
dest.on('unpipe', onunpipe);
function onunpipe(readable, unpipeInfo) {
debug('onunpipe');
if (readable === src) {
if (unpipeInfo && unpipeInfo.hasUnpiped === false) {
unpipeInfo.hasUnpiped = true;
cleanup();
}
}
}
function onend() {
debug('onend');
dest.end();
}
// when the dest drains, it reduces the awaitDrain counter
// on the source. This would be more elegant with a .once()
// handler in flow(), but adding and removing repeatedly is
// too slow.
var ondrain = pipeOnDrain(src);
dest.on('drain', ondrain);
var cleanedUp = false;
function cleanup() {
debug('cleanup');
// cleanup event handlers once the pipe is broken
dest.removeListener('close', onclose);
dest.removeListener('finish', onfinish);
dest.removeListener('drain', ondrain);
dest.removeListener('error', onerror);
dest.removeListener('unpipe', onunpipe);
src.removeListener('end', onend);
src.removeListener('end', unpipe);
src.removeListener('data', ondata);
cleanedUp = true;
// if the reader is waiting for a drain event from this
// specific writer, then it would cause it to never start
// flowing again.
// So, if this is awaiting a drain, then we just call it now.
// If we don't know, then assume that we are waiting for one.
if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain();
}
src.on('data', ondata);
function ondata(chunk) {
debug('ondata');
var ret = dest.write(chunk);
debug('dest.write', ret);
if (ret === false) {
// If the user unpiped during `dest.write()`, it is possible
// to get stuck in a permanently paused state if that write
// also returned false.
// => Check whether `dest` is still a piping destination.
if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) {
debug('false write response, pause', state.awaitDrain);
state.awaitDrain++;
}
src.pause();
}
}
// if the dest has an error, then stop piping into it.
// however, don't suppress the throwing behavior for this.
function onerror(er) {
debug('onerror', er);
unpipe();
dest.removeListener('error', onerror);
if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er);
}
// Make sure our error handler is attached before userland ones.
prependListener(dest, 'error', onerror);
// Both close and finish should trigger unpipe, but only once.
function onclose() {
dest.removeListener('finish', onfinish);
unpipe();
}
dest.once('close', onclose);
function onfinish() {
debug('onfinish');
dest.removeListener('close', onclose);
unpipe();
}
dest.once('finish', onfinish);
function unpipe() {
debug('unpipe');
src.unpipe(dest);
}
// tell the dest that it's being piped to
dest.emit('pipe', src);
// start the flow if it hasn't been started already.
if (!state.flowing) {
debug('pipe resume');
src.resume();
}
return dest;
};
function pipeOnDrain(src) {
return function pipeOnDrainFunctionResult() {
var state = src._readableState;
debug('pipeOnDrain', state.awaitDrain);
if (state.awaitDrain) state.awaitDrain--;
if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) {
state.flowing = true;
flow(src);
}
};
}
Readable.prototype.unpipe = function (dest) {
var state = this._readableState;
var unpipeInfo = {
hasUnpiped: false
};
// if we're not piping anywhere, then do nothing.
if (state.pipesCount === 0) return this;
// just one destination. most common case.
if (state.pipesCount === 1) {
// passed in one, but it's not the right one.
if (dest && dest !== state.pipes) return this;
if (!dest) dest = state.pipes;
// got a match.
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
if (dest) dest.emit('unpipe', this, unpipeInfo);
return this;
}
// slow case. multiple pipe destinations.
if (!dest) {
// remove all.
var dests = state.pipes;
var len = state.pipesCount;
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
for (var i = 0; i < len; i++) dests[i].emit('unpipe', this, {
hasUnpiped: false
});
return this;
}
// try to find the right one.
var index = indexOf(state.pipes, dest);
if (index === -1) return this;
state.pipes.splice(index, 1);
state.pipesCount -= 1;
if (state.pipesCount === 1) state.pipes = state.pipes[0];
dest.emit('unpipe', this, unpipeInfo);
return this;
};
// set up data events if they are asked for
// Ensure readable listeners eventually get something
Readable.prototype.on = function (ev, fn) {
var res = Stream.prototype.on.call(this, ev, fn);
var state = this._readableState;
if (ev === 'data') {
// update readableListening so that resume() may be a no-op
// a few lines down. This is needed to support once('readable').
state.readableListening = this.listenerCount('readable') > 0;
// Try start flowing on next tick if stream isn't explicitly paused
if (state.flowing !== false) this.resume();
} else if (ev === 'readable') {
if (!state.endEmitted && !state.readableListening) {
state.readableListening = state.needReadable = true;
state.flowing = false;
state.emittedReadable = false;
debug('on readable', state.length, state.reading);
if (state.length) {
emitReadable(this);
} else if (!state.reading) {
process.nextTick(nReadingNextTick, this);
}
}
}
return res;
};
Readable.prototype.addListener = Readable.prototype.on;
Readable.prototype.removeListener = function (ev, fn) {
var res = Stream.prototype.removeListener.call(this, ev, fn);
if (ev === 'readable') {
// We need to check if there is someone still listening to
// readable and reset the state. However this needs to happen
// after readable has been emitted but before I/O (nextTick) to
// support once('readable', fn) cycles. This means that calling
// resume within the same tick will have no
// effect.
process.nextTick(updateReadableListening, this);
}
return res;
};
Readable.prototype.removeAllListeners = function (ev) {
var res = Stream.prototype.removeAllListeners.apply(this, arguments);
if (ev === 'readable' || ev === undefined) {
// We need to check if there is someone still listening to
// readable and reset the state. However this needs to happen
// after readable has been emitted but before I/O (nextTick) to
// support once('readable', fn) cycles. This means that calling
// resume within the same tick will have no
// effect.
process.nextTick(updateReadableListening, this);
}
return res;
};
function updateReadableListening(self) {
var state = self._readableState;
state.readableListening = self.listenerCount('readable') > 0;
if (state.resumeScheduled && !state.paused) {
// flowing needs to be set to true now, otherwise
// the upcoming resume will not flow.
state.flowing = true;
// crude way to check if we should resume
} else if (self.listenerCount('data') > 0) {
self.resume();
}
}
function nReadingNextTick(self) {
debug('readable nexttick read 0');
self.read(0);
}
// pause() and resume() are remnants of the legacy readable stream API
// If the user uses them, then switch into old mode.
Readable.prototype.resume = function () {
var state = this._readableState;
if (!state.flowing) {
debug('resume');
// we flow only if there is no one listening
// for readable, but we still have to call
// resume()
state.flowing = !state.readableListening;
resume(this, state);
}
state.paused = false;
return this;
};
function resume(stream, state) {
if (!state.resumeScheduled) {
state.resumeScheduled = true;
process.nextTick(resume_, stream, state);
}
}
function resume_(stream, state) {
debug('resume', state.reading);
if (!state.reading) {
stream.read(0);
}
state.resumeScheduled = false;
stream.emit('resume');
flow(stream);
if (state.flowing && !state.reading) stream.read(0);
}
Readable.prototype.pause = function () {
debug('call pause flowing=%j', this._readableState.flowing);
if (this._readableState.flowing !== false) {
debug('pause');
this._readableState.flowing = false;
this.emit('pause');
}
this._readableState.paused = true;
return this;
};
function flow(stream) {
var state = stream._readableState;
debug('flow', state.flowing);
while (state.flowing && stream.read() !== null);
}
// wrap an old-style stream as the async data source.
// This is *not* part of the readable stream interface.
// It is an ugly unfortunate mess of history.
Readable.prototype.wrap = function (stream) {
var _this = this;
var state = this._readableState;
var paused = false;
stream.on('end', function () {
debug('wrapped end');
if (state.decoder && !state.ended) {
var chunk = state.decoder.end();
if (chunk && chunk.length) _this.push(chunk);
}
_this.push(null);
});
stream.on('data', function (chunk) {
debug('wrapped data');
if (state.decoder) chunk = state.decoder.write(chunk);
// don't skip over falsy values in objectMode
if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return;
var ret = _this.push(chunk);
if (!ret) {
paused = true;
stream.pause();
}
});
// proxy all the other methods.
// important when wrapping filters and duplexes.
for (var i in stream) {
if (this[i] === undefined && typeof stream[i] === 'function') {
this[i] = function methodWrap(method) {
return function methodWrapReturnFunction() {
return stream[method].apply(stream, arguments);
};
}(i);
}
}
// proxy certain important events.
for (var n = 0; n < kProxyEvents.length; n++) {
stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n]));
}
// when we try to consume some more bytes, simply unpause the
// underlying stream.
this._read = function (n) {
debug('wrapped _read', n);
if (paused) {
paused = false;
stream.resume();
}
};
return this;
};
if (typeof Symbol === 'function') {
Readable.prototype[Symbol.asyncIterator] = function () {
if (createReadableStreamAsyncIterator === undefined) {
createReadableStreamAsyncIterator = require('./internal/streams/async_iterator');
}
return createReadableStreamAsyncIterator(this);
};
}
Object.defineProperty(Readable.prototype, 'readableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.highWaterMark;
}
});
Object.defineProperty(Readable.prototype, 'readableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState && this._readableState.buffer;
}
});
Object.defineProperty(Readable.prototype, 'readableFlowing', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.flowing;
},
set: function set(state) {
if (this._readableState) {
this._readableState.flowing = state;
}
}
});
// exposed for testing purposes only.
Readable._fromList = fromList;
Object.defineProperty(Readable.prototype, 'readableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._readableState.length;
}
});
// Pluck off n bytes from an array of buffers.
// Length is the combined lengths of all the buffers in the list.
// This function is designed to be inlinable, so please take care when making
// changes to the function body.
function fromList(n, state) {
// nothing buffered
if (state.length === 0) return null;
var ret;
if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) {
// read it all, truncate the list
if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length);
state.buffer.clear();
} else {
// read part of list
ret = state.buffer.consume(n, state.decoder);
}
return ret;
}
function endReadable(stream) {
var state = stream._readableState;
debug('endReadable', state.endEmitted);
if (!state.endEmitted) {
state.ended = true;
process.nextTick(endReadableNT, state, stream);
}
}
function endReadableNT(state, stream) {
debug('endReadableNT', state.endEmitted, state.length);
// Check that we didn't get one last unshift.
if (!state.endEmitted && state.length === 0) {
state.endEmitted = true;
stream.readable = false;
stream.emit('end');
if (state.autoDestroy) {
// In case of duplex streams we need a way to detect
// if the writable side is ready for autoDestroy as well
var wState = stream._writableState;
if (!wState || wState.autoDestroy && wState.finished) {
stream.destroy();
}
}
}
}
if (typeof Symbol === 'function') {
Readable.from = function (iterable, opts) {
if (from === undefined) {
from = require('./internal/streams/from');
}
return from(Readable, iterable, opts);
};
}
function indexOf(xs, x) {
for (var i = 0, l = xs.length; i < l; i++) {
if (xs[i] === x) return i;
}
return -1;
}
|
PypiClean
|
/apache-superset_qwerty-2.0.11.tar.gz/apache-superset_qwerty-2.0.11/INSTALL.md
|
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
# INSTALL / BUILD instructions for Apache Superset
At this time, the docker file at RELEASING/Dockerfile.from_local_tarball
constitutes the recipe on how to get to a working release from a source
release tarball.
|
PypiClean
|
/gnosis-yodaplus-py-3.1.19.tar.gz/gnosis-yodaplus-py-3.1.19/gnosis/eth/django/serializers.py
|
import logging
from django.utils.translation import gettext_lazy as _
from ethereum.utils import checksum_encode
from hexbytes import HexBytes
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from ..constants import (SIGNATURE_R_MAX_VALUE, SIGNATURE_R_MIN_VALUE,
SIGNATURE_S_MAX_VALUE, SIGNATURE_S_MIN_VALUE,
SIGNATURE_V_MAX_VALUE, SIGNATURE_V_MIN_VALUE)
logger = logging.getLogger(__name__)
# ================================================ #
# Custom Fields
# ================================================ #
class EthereumAddressField(serializers.Field):
"""
Ethereum address checksumed
https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
"""
def __init__(self, allow_zero_address: bool = False, allow_sentinel_address: bool = False, **kwargs):
self.allow_zero_address = allow_zero_address
self.allow_sentinel_address = allow_sentinel_address
super().__init__(**kwargs)
def to_representation(self, obj):
return obj
def to_internal_value(self, data):
# Check if address is valid
try:
if checksum_encode(data) != data:
raise ValueError
elif int(data, 16) == 0 and not self.allow_zero_address:
raise ValidationError("0x0 address is not allowed")
elif int(data, 16) == 1 and not self.allow_sentinel_address:
raise ValidationError("0x1 address is not allowed")
except ValueError:
raise ValidationError("Address %s is not checksumed" % data)
except Exception:
raise ValidationError("Address %s is not valid" % data)
return data
class HexadecimalField(serializers.Field):
"""
Serializes hexadecimal values starting by `0x`. Empty values should be None or just `0x`.
"""
default_error_messages = {
'invalid': _('{value} is not an hexadecimal value.'),
'blank': _('This field may not be blank.'),
'max_length': _('Ensure this field has no more than {max_length} hexadecimal chars (not counting 0x).'),
'min_length': _('Ensure this field has at least {min_length} hexadecimal chars (not counting 0x).'),
}
def __init__(self, **kwargs):
self.allow_blank = kwargs.pop('allow_blank', False)
self.max_length = kwargs.pop('max_length', None)
self.min_length = kwargs.pop('min_length', None)
super().__init__(**kwargs)
def to_representation(self, obj):
if not obj:
return '0x'
# We can get another types like `memoryview` from django models. `to_internal_value` is not used
# when you provide an object instead of a json using `data`. Make sure everything is HexBytes.
if hasattr(obj, 'hex'):
obj = HexBytes(obj.hex())
elif not isinstance(obj, HexBytes):
obj = HexBytes(obj)
return obj.hex()
def to_internal_value(self, data):
if isinstance(data, (bytes, memoryview)):
data = data.hex()
elif isinstance(data, str):
data = data.strip() # Trim spaces
if data.startswith('0x'): # Remove 0x prefix
data = data[2:]
elif data is None:
pass
else:
self.fail('invalid', value=data)
if not data:
if self.allow_blank:
return None
else:
self.fail('blank')
try:
data_hex = HexBytes(data)
data_len = len(data_hex)
if self.min_length and data_len < self.min_length:
self.fail('min_length', min_length=data_len)
elif self.max_length and data_len > self.max_length:
self.fail('max_length', max_length=data_len)
return data_hex
except ValueError:
self.fail('invalid', value=data)
class Sha3HashField(HexadecimalField):
def __init__(self, **kwargs):
kwargs['max_length'] = 32
kwargs['min_length'] = 32
super().__init__(**kwargs)
# ================================================ #
# Base Serializers
# ================================================ #
class SignatureSerializer(serializers.Serializer):
v = serializers.IntegerField(min_value=SIGNATURE_V_MIN_VALUE,
max_value=SIGNATURE_V_MAX_VALUE)
r = serializers.IntegerField(min_value=SIGNATURE_R_MIN_VALUE,
max_value=SIGNATURE_R_MAX_VALUE)
s = serializers.IntegerField(min_value=SIGNATURE_S_MIN_VALUE,
max_value=SIGNATURE_S_MAX_VALUE)
class TransactionSerializer(serializers.Serializer):
from_ = EthereumAddressField()
value = serializers.IntegerField(min_value=0)
data = HexadecimalField()
gas = serializers.IntegerField(min_value=0)
gas_price = serializers.IntegerField(min_value=0)
nonce = serializers.IntegerField(min_value=0)
def get_fields(self):
result = super().get_fields()
# Rename `from_` to `from`
from_ = result.pop('from_')
result['from'] = from_
return result
class TransactionResponseSerializer(serializers.Serializer):
"""
Use chars to avoid problems with big ints (i.e. JavaScript)
"""
from_ = EthereumAddressField()
value = serializers.IntegerField(min_value=0)
data = serializers.CharField()
gas = serializers.CharField()
gas_price = serializers.CharField()
nonce = serializers.IntegerField(min_value=0)
def get_fields(self):
result = super().get_fields()
# Rename `from_` to `from`
from_ = result.pop('from_')
result['from'] = from_
return result
|
PypiClean
|
/boolrule-0.3.5.tar.gz/boolrule-0.3.5/docs/index.rst
|
.. boolrule documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to boolrule's documentation
===================================
boolrule is a simpe boolean expression evaluation engine.
boolrule was built by the team at tails.com_ to evaluate conditional edges
between nodes in a graph-like structure, though we've found numerous uses for
it since.
Contents:
.. toctree::
:maxdepth: 1
readme
installation
usage
api
expressions
contributing
authors
history
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. _tails.com: https://tails.com
|
PypiClean
|
/nexuscloud-client-1.0.9.tar.gz/nexuscloud-client-1.0.9/nexuscloud_client/api/reports_api.py
|
import re # noqa: F401
import sys # noqa: F401
from nexuscloud_client.api_client import ApiClient, Endpoint as _Endpoint
from nexuscloud_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from nexuscloud_client.model.nexus_insights_api_v1_advisories_details_get401_response import NexusInsightsApiV1AdvisoriesDetailsGet401Response
from nexuscloud_client.model.nexus_insights_api_v1_reports_cloud_details_get200_response import NexusInsightsApiV1ReportsCloudDetailsGet200Response
from nexuscloud_client.model.nexus_insights_api_v1_reports_cloud_inventory_get200_response import NexusInsightsApiV1ReportsCloudInventoryGet200Response
from nexuscloud_client.model.nexus_insights_api_v1_reports_cloud_summary_get200_response import NexusInsightsApiV1ReportsCloudSummaryGet200Response
from nexuscloud_client.model.nexus_insights_api_v1_reports_details_get200_response import NexusInsightsApiV1ReportsDetailsGet200Response
from nexuscloud_client.model.nexus_insights_api_v1_reports_inventory_get200_response import NexusInsightsApiV1ReportsInventoryGet200Response
from nexuscloud_client.model.nexus_insights_api_v1_reports_summary_get200_response import NexusInsightsApiV1ReportsSummaryGet200Response
class ReportsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.nexus_insights_api_v1_reports_cloud_details_get_endpoint = _Endpoint(
settings={
'response_type': (NexusInsightsApiV1ReportsCloudDetailsGet200Response,),
'auth': [],
'endpoint_path': '/nexus/insights/api/v1/reports/cloudDetails',
'operation_id': 'nexus_insights_api_v1_reports_cloud_details_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'outlook',
'site_name',
'site_name_list',
'site_group_name',
'filter',
'include',
'offset',
'count',
'sort',
],
'required': [
'outlook',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'outlook':
(int,),
'site_name':
(str,),
'site_name_list':
(str,),
'site_group_name':
(str,),
'filter':
(str,),
'include':
(str,),
'offset':
(str,),
'count':
(str,),
'sort':
(str,),
},
'attribute_map': {
'outlook': 'outlook',
'site_name': 'siteName',
'site_name_list': 'siteNameList',
'site_group_name': 'siteGroupName',
'filter': 'filter',
'include': 'include',
'offset': 'offset',
'count': 'count',
'sort': 'sort',
},
'location_map': {
'outlook': 'query',
'site_name': 'query',
'site_name_list': 'query',
'site_group_name': 'query',
'filter': 'query',
'include': 'query',
'offset': 'query',
'count': 'query',
'sort': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.nexus_insights_api_v1_reports_cloud_inventory_get_endpoint = _Endpoint(
settings={
'response_type': (NexusInsightsApiV1ReportsCloudInventoryGet200Response,),
'auth': [],
'endpoint_path': '/nexus/insights/api/v1/reports/cloudInventory',
'operation_id': 'nexus_insights_api_v1_reports_cloud_inventory_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'site_name',
'site_name_list',
'site_group_name',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'site_name':
(str,),
'site_name_list':
(str,),
'site_group_name':
(str,),
},
'attribute_map': {
'site_name': 'siteName',
'site_name_list': 'siteNameList',
'site_group_name': 'siteGroupName',
},
'location_map': {
'site_name': 'query',
'site_name_list': 'query',
'site_group_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.nexus_insights_api_v1_reports_cloud_summary_get_endpoint = _Endpoint(
settings={
'response_type': (NexusInsightsApiV1ReportsCloudSummaryGet200Response,),
'auth': [],
'endpoint_path': '/nexus/insights/api/v1/reports/cloudSummary',
'operation_id': 'nexus_insights_api_v1_reports_cloud_summary_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'outlook_period',
'report_type',
'site_name',
'site_name_list',
'site_group_name',
],
'required': [
'outlook_period',
'report_type',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'outlook_period':
(int,),
'report_type':
(str,),
'site_name':
(str,),
'site_name_list':
(str,),
'site_group_name':
(str,),
},
'attribute_map': {
'outlook_period': 'outlookPeriod',
'report_type': 'reportType',
'site_name': 'siteName',
'site_name_list': 'siteNameList',
'site_group_name': 'siteGroupName',
},
'location_map': {
'outlook_period': 'query',
'report_type': 'query',
'site_name': 'query',
'site_name_list': 'query',
'site_group_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.nexus_insights_api_v1_reports_details_get_endpoint = _Endpoint(
settings={
'response_type': (NexusInsightsApiV1ReportsDetailsGet200Response,),
'auth': [],
'endpoint_path': '/nexus/insights/api/v1/reports/details',
'operation_id': 'nexus_insights_api_v1_reports_details_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'outlook',
'site_name',
'site_group_name',
'filter',
'include',
],
'required': [
'outlook',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'outlook':
(int,),
'site_name':
(str,),
'site_group_name':
(str,),
'filter':
(str,),
'include':
(str,),
},
'attribute_map': {
'outlook': 'outlook',
'site_name': 'siteName',
'site_group_name': 'siteGroupName',
'filter': 'filter',
'include': 'include',
},
'location_map': {
'outlook': 'query',
'site_name': 'query',
'site_group_name': 'query',
'filter': 'query',
'include': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.nexus_insights_api_v1_reports_inventory_get_endpoint = _Endpoint(
settings={
'response_type': (NexusInsightsApiV1ReportsInventoryGet200Response,),
'auth': [],
'endpoint_path': '/nexus/insights/api/v1/reports/inventory',
'operation_id': 'nexus_insights_api_v1_reports_inventory_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'site_name',
'site_group_name',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'site_name':
(str,),
'site_group_name':
(str,),
},
'attribute_map': {
'site_name': 'siteName',
'site_group_name': 'siteGroupName',
},
'location_map': {
'site_name': 'query',
'site_group_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.nexus_insights_api_v1_reports_summary_get_endpoint = _Endpoint(
settings={
'response_type': (NexusInsightsApiV1ReportsSummaryGet200Response,),
'auth': [],
'endpoint_path': '/nexus/insights/api/v1/reports/summary',
'operation_id': 'nexus_insights_api_v1_reports_summary_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'outlook_period',
'report_type',
'site_name',
'site_group_name',
],
'required': [
'outlook_period',
'report_type',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'outlook_period':
(int,),
'report_type':
(str,),
'site_name':
(str,),
'site_group_name':
(str,),
},
'attribute_map': {
'outlook_period': 'outlookPeriod',
'report_type': 'reportType',
'site_name': 'siteName',
'site_group_name': 'siteGroupName',
},
'location_map': {
'outlook_period': 'query',
'report_type': 'query',
'site_name': 'query',
'site_group_name': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def nexus_insights_api_v1_reports_cloud_details_get(
self,
outlook,
**kwargs
):
"""Get conformance details of a reportType for a siteName over an outlookPeriod # noqa: E501
Get a hardware, software or overall conformance details for a fabric over a projection period (from now to max. 18 months in the future). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.nexus_insights_api_v1_reports_cloud_details_get(outlook, async_req=True)
>>> result = thread.get()
Args:
outlook (int): Period over which the conformance details for the fabric are projected
Keyword Args:
site_name (str): Name of the Site - limit the records pertaining to the site. [optional]
site_name_list (str): List of site names. [optional]
site_group_name (str): Name of the Site Group - limit the records pertaining to the sites in this site group. [optional]
filter (str): Lucene format filter - Filter the response based on this filter field. [optional]
include (str): Device types to include in the conformance results. [optional]
offset (str): Pagination index into response. [optional]
count (str): Limits the number of entries in the response. [optional]
sort (str): Sort records in response by this field. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
NexusInsightsApiV1ReportsCloudDetailsGet200Response
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['outlook'] = \
outlook
return self.nexus_insights_api_v1_reports_cloud_details_get_endpoint.call_with_http_info(**kwargs)
def nexus_insights_api_v1_reports_cloud_inventory_get(
self,
**kwargs
):
"""Get Inventory reports for a siteName # noqa: E501
Get Inventory reports for the given siteName # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.nexus_insights_api_v1_reports_cloud_inventory_get(async_req=True)
>>> result = thread.get()
Keyword Args:
site_name (str): Name of the Site - limit the records pertaining to the site. [optional]
site_name_list (str): List of site names. [optional]
site_group_name (str): Name of the Site Group - limit the records pertaining to the sites in this site group. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
NexusInsightsApiV1ReportsCloudInventoryGet200Response
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.nexus_insights_api_v1_reports_cloud_inventory_get_endpoint.call_with_http_info(**kwargs)
def nexus_insights_api_v1_reports_cloud_summary_get(
self,
outlook_period,
report_type,
**kwargs
):
"""Get conformance summary of a reportType for a siteName over an outlookPeriod # noqa: E501
Get a hardware, software or overall conformance summary for a fabric over a projection period (from now to max. 18 months in the future). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.nexus_insights_api_v1_reports_cloud_summary_get(outlook_period, report_type, async_req=True)
>>> result = thread.get()
Args:
outlook_period (int): Period over which the conformance details for the fabric are projected
report_type (str): Type of report
Keyword Args:
site_name (str): Name of the Site - limit the records pertaining to the site. [optional]
site_name_list (str): List of site names. [optional]
site_group_name (str): Name of the Site Group - limit the records pertaining to the sites in this site group. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
NexusInsightsApiV1ReportsCloudSummaryGet200Response
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['outlook_period'] = \
outlook_period
kwargs['report_type'] = \
report_type
return self.nexus_insights_api_v1_reports_cloud_summary_get_endpoint.call_with_http_info(**kwargs)
def nexus_insights_api_v1_reports_details_get(
self,
outlook,
**kwargs
):
"""Get conformance details of a reportType for a siteName over an outlookPeriod # noqa: E501
Get a hardware, software or overall conformance details for a fabric over a projection period (from now to max. 18 months in the future). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.nexus_insights_api_v1_reports_details_get(outlook, async_req=True)
>>> result = thread.get()
Args:
outlook (int): Period over which the conformance details for the fabric are projected
Keyword Args:
site_name (str): Name of the Site - limit the records pertaining to the site. [optional]
site_group_name (str): Name of the Site Group - limit the records pertaining to the sites in this site group. [optional]
filter (str): Lucene format filter - Filter the response based on this filter field. [optional]
include (str): Device types to include in the conformance results. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
NexusInsightsApiV1ReportsDetailsGet200Response
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['outlook'] = \
outlook
return self.nexus_insights_api_v1_reports_details_get_endpoint.call_with_http_info(**kwargs)
def nexus_insights_api_v1_reports_inventory_get(
self,
**kwargs
):
"""Get Inventory reports for a siteName # noqa: E501
Get Inventory reports for the given siteName # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.nexus_insights_api_v1_reports_inventory_get(async_req=True)
>>> result = thread.get()
Keyword Args:
site_name (str): Name of the Site - limit the records pertaining to the site. [optional]
site_group_name (str): Name of the Site Group - limit the records pertaining to the sites in this site group. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
NexusInsightsApiV1ReportsInventoryGet200Response
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.nexus_insights_api_v1_reports_inventory_get_endpoint.call_with_http_info(**kwargs)
def nexus_insights_api_v1_reports_summary_get(
self,
outlook_period,
report_type,
**kwargs
):
"""Get conformance summary of a reportType for a siteName over an outlookPeriod # noqa: E501
Get a hardware, software or overall conformance summary for a fabric over a projection period (from now to max. 18 months in the future). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.nexus_insights_api_v1_reports_summary_get(outlook_period, report_type, async_req=True)
>>> result = thread.get()
Args:
outlook_period (int): Period over which the conformance details for the fabric are projected
report_type (str): Type of report
Keyword Args:
site_name (str): Name of the Site - limit the records pertaining to the site. [optional]
site_group_name (str): Name of the Site Group - limit the records pertaining to the sites in this site group. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
NexusInsightsApiV1ReportsSummaryGet200Response
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['outlook_period'] = \
outlook_period
kwargs['report_type'] = \
report_type
return self.nexus_insights_api_v1_reports_summary_get_endpoint.call_with_http_info(**kwargs)
|
PypiClean
|
/ai_flow-0.3.1.tar.gz/ai_flow-0.3.1/airflow/cli/simple_table.py
|
import inspect
import json
from typing import Any, Callable, Dict, List, Optional, Union
import yaml
from rich.box import ASCII_DOUBLE_HEAD
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
from airflow.plugins_manager import PluginsDirectorySource
class AirflowConsole(Console):
"""Airflow rich console"""
def print_as_json(self, data: Dict):
"""Renders dict as json text representation"""
json_content = json.dumps(data)
self.print(Syntax(json_content, "json", theme="ansi_dark"), soft_wrap=True)
def print_as_yaml(self, data: Dict):
"""Renders dict as yaml text representation"""
yaml_content = yaml.dump(data)
self.print(Syntax(yaml_content, "yaml", theme="ansi_dark"), soft_wrap=True)
def print_as_table(self, data: List[Dict]):
"""Renders list of dictionaries as table"""
if not data:
self.print("No data found")
return
table = SimpleTable(
show_header=True,
)
for col in data[0].keys():
table.add_column(col)
for row in data:
table.add_row(*[str(d) for d in row.values()])
self.print(table)
# pylint: disable=too-many-return-statements
def _normalize_data(self, value: Any, output: str) -> Optional[Union[list, str, dict]]:
if isinstance(value, (tuple, list)):
if output == "table":
return ",".join(self._normalize_data(x, output) for x in value)
return [self._normalize_data(x, output) for x in value]
if isinstance(value, dict) and output != "table":
return {k: self._normalize_data(v, output) for k, v in value.items()}
if inspect.isclass(value) and not isinstance(value, PluginsDirectorySource):
return value.__name__
if value is None:
return None
return str(value)
def print_as(self, data: List[Union[Dict, Any]], output: str, mapper: Optional[Callable] = None):
"""Prints provided using format specified by output argument"""
output_to_renderer = {
"json": self.print_as_json,
"yaml": self.print_as_yaml,
"table": self.print_as_table,
}
renderer = output_to_renderer.get(output)
if not renderer:
raise ValueError(
f"Unknown formatter: {output}. Allowed options: {list(output_to_renderer.keys())}"
)
if not all(isinstance(d, dict) for d in data) and not mapper:
raise ValueError("To tabulate non-dictionary data you need to provide `mapper` function")
if mapper:
dict_data: List[Dict] = [mapper(d) for d in data]
else:
dict_data: List[Dict] = data
dict_data = [{k: self._normalize_data(v, output) for k, v in d.items()} for d in dict_data]
renderer(dict_data)
class SimpleTable(Table):
"""A rich Table with some default hardcoded for consistency."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.show_edge = kwargs.get("show_edge", False)
self.pad_edge = kwargs.get("pad_edge", False)
self.box = kwargs.get("box", ASCII_DOUBLE_HEAD)
self.show_header = kwargs.get("show_header", False)
self.title_style = kwargs.get("title_style", "bold green")
self.title_justify = kwargs.get("title_justify", "left")
self.caption = kwargs.get("caption", " ")
def add_column(self, *args, **kwargs) -> None: # pylint: disable=signature-differs
"""Add a column to the table. We use different default"""
kwargs["overflow"] = kwargs.get("overflow", None) # to avoid truncating
super().add_column(*args, **kwargs)
|
PypiClean
|
/sdksio_verizon_apis_sdk-1.0.0-py3-none-any.whl/verizon/models/firmware_package.py
|
from verizon.api_helper import APIHelper
class FirmwarePackage(object):
"""Implementation of the 'FirmwarePackage' model.
Available firmware.
Attributes:
firmware_name (string): Firmware name.
firmware_from (string): Firmware from version.
firmware_to (string): Firmware to version.
launch_date (datetime): Firmware launch date.
release_note (string): Firmware release note.
model (string): Firmware applicable device model.
make (string): Firmware applicable device make.
protocol (CampaignMetaInfoProtocolEnum): Firmware protocol. Valid
values include: LWM2M, OMD-DM.
"""
# Create a mapping from Model property names to API property names
_names = {
"firmware_name": 'firmwareName',
"firmware_from": 'firmwareFrom',
"firmware_to": 'firmwareTo',
"launch_date": 'launchDate',
"release_note": 'releaseNote',
"model": 'model',
"make": 'make',
"protocol": 'protocol'
}
def __init__(self,
firmware_name=None,
firmware_from=None,
firmware_to=None,
launch_date=None,
release_note=None,
model=None,
make=None,
protocol='LWM2M'):
"""Constructor for the FirmwarePackage class"""
# Initialize members of the class
self.firmware_name = firmware_name
self.firmware_from = firmware_from
self.firmware_to = firmware_to
self.launch_date = APIHelper.RFC3339DateTime(launch_date) if launch_date else None
self.release_note = release_note
self.model = model
self.make = make
self.protocol = protocol
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
firmware_name = dictionary.get("firmwareName") if dictionary.get("firmwareName") else None
firmware_from = dictionary.get("firmwareFrom") if dictionary.get("firmwareFrom") else None
firmware_to = dictionary.get("firmwareTo") if dictionary.get("firmwareTo") else None
launch_date = APIHelper.RFC3339DateTime.from_value(dictionary.get("launchDate")).datetime if dictionary.get("launchDate") else None
release_note = dictionary.get("releaseNote") if dictionary.get("releaseNote") else None
model = dictionary.get("model") if dictionary.get("model") else None
make = dictionary.get("make") if dictionary.get("make") else None
protocol = dictionary.get("protocol") if dictionary.get("protocol") else 'LWM2M'
# Return an object of this model
return cls(firmware_name,
firmware_from,
firmware_to,
launch_date,
release_note,
model,
make,
protocol)
|
PypiClean
|
/bmcs_cross_section-0.0.56a0.tar.gz/bmcs_cross_section-0.0.56a0/bmcs_cross_section/mxn/matresdev/db/exdb/loadtxt_bending.py
|
import os
from matresdev.db.simdb.simdb import simdb
import numpy as np
def dot2comma(value):
value = value.replace(',', '.')
return float(value)
file_name = os.path.join(simdb.exdata_dir, 'bending_tests',
'ZiE_2011-06-08_BT-12c-6cm-0-TU', 'BT-12c-6cm-0-Tu-V4.raw')
# file contains both loading- and unloading path:
#
#file_name = '/home/alexander/simdb/exdata/bending_tests/ZiE_2011-06-08_BT-12c-6cm-0-TU/BT-12c-6cm-0-Tu-V2-converted.csv'
def loadtxt_bending(file_name):
'''Return an data array of the bending test
- first column: displacement [mm]
- second column: compression strains at midfield [%]
- third column: load [N]
'''
try:
# Return an data array for the loading path (1 block).
# load raw-data in case of loading path only
# (no additional unloading path recorded below the first data block in the file)
# in this case loadtxt works properly'''
data_arr = np.loadtxt(file_name,
delimiter=';',
skiprows=41,
converters={
1: dot2comma, 2: dot2comma, 3: dot2comma},
usecols=[1, 2, 3])
print('loadtxt_bending: data_arr contains only loading path')
except IndexError:
print('loadtxt_bending: data_arr contains loading- and unloading path')
data_arr = loadtxt_2blocks(file_name)
return data_arr
def loadtxt_2blocks(file_name):
'''Return an data array consisting of the loading AND unloading path (merge 2 blocks in the data file).
in this case loadtxt doesn't work as the data file consits of 2 blocks'''
file_ = open(file_name, 'r')
lines = file_.readlines()
data_arr_1 = np.zeros(3)
data_arr_2 = np.zeros(3)
start_n_blocks = []
end_n_blocks = []
# determine the starting number and end number of the data blocks 1 and 2:
#
n = 0
for line in lines:
line_split = line.split(';')
if line_split[0] == '"Probe"':
# first block normally starts with line 43
# the starting line of the second block needs to be determined
# 27 lines after the keyword "Probe" the data is recorded in both blocks
#
start_n_blocks.append(n + 28)
if line_split[0] == '"Probe"':
end_n_blocks.append(n)
n += 1
if len(end_n_blocks) != 1:
# add the line number of the last line
# this corresponds to the last line of block 2 if it is recorded
#
end_n_blocks.append(len(lines))
end_n_blocks = end_n_blocks[1:]
# print 'start_n_blocks', start_n_blocks
# print 'end_n_blocks', end_n_blocks
# convert data to array for blocks 1:
#
for line in lines[start_n_blocks[0]:end_n_blocks[0]]:
line_split = line.split(';')
line_arr = np.array([dot2comma(line_split[1]),
dot2comma(line_split[2]),
dot2comma(line_split[3])],
dtype=float)
data_arr_1 = np.vstack([data_arr_1, line_arr])
# convert data to array for blocks 2:
#
for line in lines[start_n_blocks[1]:end_n_blocks[1]]:
line_split = line.split(';')
line_arr = np.array([dot2comma(line_split[1]),
dot2comma(line_split[2]),
dot2comma(line_split[3])],
dtype=float)
data_arr_2 = np.vstack([data_arr_2, line_arr])
# remove line with zeros
#
data_arr = np.vstack([data_arr_1[1:], data_arr_2[1:]])
return data_arr
if __name__ == '__main__':
data_arr = loadtxt_bending(file_name)
print('data_arr', data_arr)
|
PypiClean
|
/openc.excludesearch-0.1.1.tar.gz/openc.excludesearch-0.1.1/openc/excludesearch/README.txt
|
Introduction
============
openc.excludesearch is designed to make it possible for an administrator to easily mark content as being hidden to searches.
NB: Requires dm's AdvancedQuery from http://www.dieter.handshake.de/pyprojects/zope/#AdvancedQuery
First off, install myself::
>>> self.addProduct("openc.excludesearch")
We set up an object that will show in searches for "foo"::
>>> self.folder.invokeFactory('Document', 'foo', 'Foo Page')
'foo'
>>> self.folder['foo']
<ATDocument at .../foo>
>>> self.folder['foo'].setText("I am a foo object from foosville.")
We name it foo for convenience::
>>> foo = self.folder['foo']
We can see its text fine::
>>> foo.getText()
'<p>I am a foo object from foosville.</p>'
>>> foo.reindexObject()
If we search for it, we find it::
>>> app.plone.restrictedTraverse("queryCatalog")({"SearchableText":"foo"})
[<Products.ZCatalog.Catalog.mybrains object at ...>]
Once we activate the hiding functionality we don't::
>>> foo.getField('hidesearch').get(foo)
False
>>> foo.getField('hidesearch').set(foo, value=True)
>>> foo.reindexObject()
>>> foo.getField('hidesearch').get(foo)
True
>>> app.plone.restrictedTraverse("queryCatalog")({"SearchableText":"foo"})
[]
This is because it has a new marker interface::
>>> from openc.excludesearch.interfaces import IExcludeFromSearch
>>> IExcludeFromSearch(foo)
<ATDocument at .../foo>
If we remove this marker, we find it again::
>>> foo.getField('hidesearch').get(foo)
True
>>> foo.getField('hidesearch').set(foo, value=False)
>>> foo.reindexObject()
>>> foo.getField('hidesearch').get(foo)
False
>>> app.plone.restrictedTraverse("queryCatalog")({"SearchableText":"foo"})
[<Products.ZCatalog.Catalog.mybrains object at ...>]
As we no longer provide that interface::
>>> IExcludeFromSearch(foo)
Traceback (most recent call last):
...
TypeError: ('Could not adapt', <ATDocument at .../foo>, <InterfaceClass openc.excludesearch.interfaces.IExcludeFromSearch>)
Todo
====
1) Subclass/monkeypatch collections so they know to ignore things with the
IExcludeFromSearch marker interface
|
PypiClean
|
/curver-0.4.1.tar.gz/curver-0.4.1/docs/user/representations.rst
|
Representations
===============
Many of the data structures used to represent topological objects combinatorially within curver are based on (ideal) triangulations.
For example, :class:`~curver.kernel.lamination.Lamination`, :class:`~curver.kernel.homologyclass.HomologyClass` and :class:`~curver.kernel.encoding.MappingClass`.
But also some of the more specialist objects such as :class:`~curver.kernel.encoding.Encoding` and :class:`~curver.kernel.encoding.Mapping`.
Laminations
-----------
Laminations on a surface can be represented by their intersection numbers with the edges of a fixed triangulation.
There are 14 different types of laminations that can be drawn inside of a single triangle.
However, if the lamination is a :class:`~curver.kernel.curve.MultiCurve` then only four of the types (empty, corridor, bipod and wye) can possibly appear.
.. image:: ../figures/types.svg
:height: 300
:alt: The different types of lamination in a triangle.
:target: ../_images/types.svg
:align: center
Flips
-----
Curver also implements update rules for computing the effect of flipping the diagonal of a square for all 30 cases of laminations in a square [Bell15]_.
There are eight exceptional cases (D(bc), D(ad), A(cd), A(ab), N(cd), N(bc), N(ad) and N(ab)) where a non-standard update rule is required.
These are documented in the figure below.
All others follow the standard update rule :math:`\max(a + c, b + d) - e`.
.. image:: ../figures/flip.svg
:height: 400
:alt: MCG generators
:target: ../_images/flip.svg
:align: center
Mapping Classes
---------------
Curver represents mapping classes via paths in the flip graph.
This graph has vertices corresponding to (ideal) triangulations of the surface (with ordered arcs) and edges whenever two differ by a flip.
The graph is quasi-isometric to the mapping class group.
Curver can automatically convert Dehn twists about curves into paths but currently has no way to reverse this process.
So that many of its algorithms can run in polynomial time, curver adds "short cuts" to the flip graph.
These additional edges correspond to powers of Dehn twists about curves.
If :math:`c` is a curve on :math:`\mathcal{T}` then there is an edge from :math:`\mathcal{T}` to :math:`D_c^n(\mathcal{T})` of length :math:`\log(n) + ||\mathcal{T}(c)||`.
|
PypiClean
|
/wmagent-2.2.4rc3.tar.gz/wmagent-2.2.4rc3/src/python/WMComponent/RucioInjector/Database/MySQL/GetCompletedBlocks.py
|
from WMCore.Database.DBFormatter import DBFormatter
class GetCompletedBlocks(DBFormatter):
"""
Retrieves a list of blocks that are closed but NOT sure yet if they are deleteable:
- The workflows for all files in the block need to be completed (This relates only to the
workflows directly producing the files and does not track child workflows completion)
- The subscription made for dataset is copy+delete
- A subscription has been made at the Data Management system
- The blocks hasn't been deleted yet
"""
sql = """SELECT dbsbuffer_block.blockname,
dbsbuffer_location.pnn,
dbsbuffer_dataset.path,
dbsbuffer_dataset_subscription.site,
dbsbuffer_workflow.name,
dbsbuffer_block.create_time
FROM dbsbuffer_dataset_subscription
INNER JOIN dbsbuffer_dataset ON
dbsbuffer_dataset.id = dbsbuffer_dataset_subscription.dataset_id
INNER JOIN dbsbuffer_block ON
dbsbuffer_block.dataset_id = dbsbuffer_dataset_subscription.dataset_id
INNER JOIN dbsbuffer_file ON
dbsbuffer_file.block_id = dbsbuffer_block.id
INNER JOIN dbsbuffer_workflow ON
dbsbuffer_workflow.id = dbsbuffer_file.workflow
INNER JOIN dbsbuffer_location ON
dbsbuffer_location.id = dbsbuffer_block.location
WHERE dbsbuffer_dataset_subscription.delete_blocks = 1
AND dbsbuffer_dataset_subscription.subscribed = 1
AND dbsbuffer_block.status = 'Closed'
AND dbsbuffer_block.deleted = 0
GROUP BY dbsbuffer_block.blockname,
dbsbuffer_location.pnn,
dbsbuffer_dataset.path,
dbsbuffer_dataset_subscription.site,
dbsbuffer_workflow.name,
dbsbuffer_block.create_time
"""
def format(self, result):
"""
_format_
Format the query results into the proper dictionary expected at the upper layer Python code.
The input should be a list of database objects each representing a line returned from the database
with key names matching the column names from the sql query
The result should be a list of dictionaries one record per block returned from the database
with key names mapped to the python code expected structures. All workflows and sites are aggregated
into the same block record.
e.g.
{ '/Tau/Run2022C-PromptReco-v1/MINIAOD#2dd5a82b-873a-4403-8da1-6b943dac7081': {'blockCreateTime': 1659675842,
'blockName': '/Tau/Run2022C-PromptReco-v1/MINIAOD#2dd5a82b-873a-4403-8da1-6b943dac7081',
'dataset': '/Tau/Run2022C-PromptReco-v1/MINIAOD',
'location': 'T0_CH_CERN_Disk',
'sites': {'T1_ES_PIC_Disk',
'T1_ES_PIC_MSS'},
'workflowNames': {'PromptReco_Run356614_Tau'}},
'/Tau/Run2022C-PromptReco-v1/MINIAOD#f6bf5cc7-cab2-4572-8f30-574296bb109d': {'blockCreateTime': 1659723755,
'blockName': '/Tau/Run2022C-PromptReco-v1/MINIAOD#f6bf5cc7-cab2-4572-8f30-574296bb109d',
'dataset': '/Tau/Run2022C-PromptReco-v1/MINIAOD',
'location': 'T0_CH_CERN_Disk',
'sites': {'T1_ES_PIC_Disk',
'T1_ES_PIC_MSS'},
'workflowNames': {'PromptReco_Run356615_Tau',
'PromptReco_Run356619_Tau'}}
}
NOTE:
* location: Means where the output block has been created
* site(s): Means where the dataset gets a container-level rule
:param result: The result as returned by the mysql query execution.
:return: Dictionary of dictionaries, each one describing a block.
"""
# NOTE: We need to rename all the keys to follow the cammelCase standard. And also to comply
# with the key names as expected from the rest of the already existing python code
keyMap = {'blockname': 'blockName',
'name': 'workflowNames',
'pnn': 'location',
'site': 'sites',
'path': 'dataset',
'create_time': 'blockCreateTime'}
listResults = DBFormatter.formatDict(self, result)
dictResults = {}
for record in listResults:
# Populates results dict and adds all workflows and sites of the same block to a single record
blockName = record['blockname']
if blockName in dictResults:
dictResults[blockName]['workflowNames'].add(record['name'])
dictResults[blockName]['sites'].add(record['site'])
else:
for dbKey, pyKey in keyMap.items():
if dbKey == 'site' or dbKey == 'name':
data = record.pop(dbKey)
record[pyKey] = set()
record[pyKey].add(data)
else:
record[pyKey] = record.pop(dbKey)
dictResults[blockName] = record
return dictResults
def execute(self, conn=None, transaction=False, returnCursor=False):
"""
Executing the current sql query.
:param conn: A current database connection to be used if existing
:param transaction: A current database transaction to be used if existing
:return: A list of dictionaries one record for each database line returned
"""
results = self.dbi.processData(self.sql, conn=conn,
transaction=transaction)
return self.format(results)
|
PypiClean
|
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/me/drives/item/list/content_types/item/base_types/base_types_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ........models import content_type_collection_response
from ........models.o_data_errors import o_data_error
from .count import count_request_builder
class BaseTypesRequestBuilder():
"""
Provides operations to manage the baseTypes property of the microsoft.graph.contentType entity.
"""
def count(self) -> count_request_builder.CountRequestBuilder:
"""
Provides operations to count the resources in the collection.
"""
return count_request_builder.CountRequestBuilder(self.request_adapter, self.path_parameters)
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new BaseTypesRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/me/drives/{drive%2Did}/list/contentTypes/{contentType%2Did}/baseTypes{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_get_request_information(self,request_configuration: Optional[BaseTypesRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The collection of content types that are ancestors of this content type.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
async def get(self,request_configuration: Optional[BaseTypesRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[content_type_collection_response.ContentTypeCollectionResponse]:
"""
The collection of content types that are ancestors of this content type.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[content_type_collection_response.ContentTypeCollectionResponse]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, content_type_collection_response.ContentTypeCollectionResponse, response_handler, error_mapping)
@dataclass
class BaseTypesRequestBuilderGetQueryParameters():
"""
The collection of content types that are ancestors of this content type.
"""
# Include count of items
count: Optional[bool] = None
# Expand related entities
expand: Optional[List[str]] = None
# Filter items by property values
filter: Optional[str] = None
# Order items by property values
orderby: Optional[List[str]] = None
# Search items by search phrases
search: Optional[str] = None
# Select properties to be returned
select: Optional[List[str]] = None
# Skip the first n items
skip: Optional[int] = None
# Show only the first n items
top: Optional[int] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "count":
return "%24count"
if original_name == "expand":
return "%24expand"
if original_name == "filter":
return "%24filter"
if original_name == "orderby":
return "%24orderby"
if original_name == "search":
return "%24search"
if original_name == "select":
return "%24select"
if original_name == "skip":
return "%24skip"
if original_name == "top":
return "%24top"
return original_name
@dataclass
class BaseTypesRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[BaseTypesRequestBuilder.BaseTypesRequestBuilderGetQueryParameters] = None
|
PypiClean
|
/open_aea-1.38.0-py3-none-win32.whl/aea/cli/eject.py
|
"""Implementation of the 'aea eject' subcommand."""
import shutil
from pathlib import Path
from typing import cast
import click
from packaging.version import Version
import aea
from aea.cli.fingerprint import fingerprint_item
from aea.cli.remove import ItemRemoveHelper
from aea.cli.utils.click_utils import PublicIdParameter
from aea.cli.utils.config import (
load_item_config,
set_cli_author,
try_to_load_agent_config,
update_item_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import check_aea_project, clean_after, pass_ctx
from aea.cli.utils.package_utils import (
copy_package_directory,
create_symlink_packages_to_vendor,
create_symlink_vendor_to_local,
fingerprint_all,
get_package_path,
is_item_present,
replace_all_import_statements,
update_item_public_id_in_init,
update_references,
)
from aea.configurations.base import (
ComponentId,
ComponentType,
PackageId,
PackageType,
PublicId,
)
from aea.configurations.constants import (
CONNECTION,
CONTRACT,
DEFAULT_VERSION,
PROTOCOL,
SKILL,
)
from aea.configurations.utils import get_latest_component_id_from_prefix
from aea.helpers.base import (
compute_specifier_from_version,
find_topological_order,
reachable_nodes,
)
from aea.helpers.ipfs.base import IPFSHashOnly
@click.group()
@click.option(
"--with-symlinks",
is_flag=True,
help="Add symlinks from vendor to non-vendor and packages to vendor folders.",
)
@click.option(
"-q",
"--quiet",
"quiet",
is_flag=True,
required=False,
default=False,
help="If provided, the command will not ask the user for confirmation.",
)
@click.pass_context
@check_aea_project
def eject(click_context: click.core.Context, quiet: bool, with_symlinks: bool) -> None:
"""Eject a vendor package of the agent."""
click_context.obj.set_config("quiet", quiet)
click_context.obj.set_config("with_symlinks", with_symlinks)
set_cli_author(click_context)
@eject.command()
@click.argument("public_id", type=PublicIdParameter(), required=True)
@pass_ctx
def connection(ctx: Context, public_id: PublicId) -> None:
"""Eject a vendor connection."""
quiet = ctx.config.get("quiet")
with_symlinks = ctx.config.get("with_symlinks")
_eject_item(ctx, CONNECTION, public_id, quiet=quiet, with_symlinks=with_symlinks)
@eject.command()
@click.argument("public_id", type=PublicIdParameter(), required=True)
@pass_ctx
def contract(ctx: Context, public_id: PublicId) -> None:
"""Eject a vendor contract."""
quiet = ctx.config.get("quiet")
with_symlinks = ctx.config.get("with_symlinks")
_eject_item(ctx, CONTRACT, public_id, quiet=quiet, with_symlinks=with_symlinks)
@eject.command()
@click.argument("public_id", type=PublicIdParameter(), required=True)
@pass_ctx
def protocol(ctx: Context, public_id: PublicId) -> None:
"""Eject a vendor protocol."""
quiet = ctx.config.get("quiet")
with_symlinks = ctx.config.get("with_symlinks")
_eject_item(ctx, PROTOCOL, public_id, quiet=quiet, with_symlinks=with_symlinks)
@eject.command()
@click.argument("public_id", type=PublicIdParameter(), required=True)
@pass_ctx
def skill(ctx: Context, public_id: PublicId) -> None:
"""Eject a vendor skill."""
quiet = ctx.config.get("quiet")
with_symlinks = ctx.config.get("with_symlinks")
_eject_item(ctx, SKILL, public_id, quiet=quiet, with_symlinks=with_symlinks)
@clean_after
def _eject_item(
ctx: Context,
item_type: str,
public_id: PublicId,
quiet: bool = True,
with_symlinks: bool = False,
) -> None:
"""
Eject item from installed (vendor) to custom folder.
:param ctx: context object.
:param item_type: item type.
:param public_id: item public ID.
:param quiet: if false, the function will ask the user in case of recursive eject.
:param with_symlinks: if eject should create symlinks.
:raises ClickException: if item is absent at source path or present at destination path.
"""
# we know cli_author is set because of the above checks.
cli_author: str = cast(str, ctx.config.get("cli_author"))
item_type_plural = item_type + "s"
if not is_item_present(
ctx.cwd,
ctx.agent_config,
item_type,
public_id,
is_vendor=True,
with_version=True,
): # pragma: no cover
raise click.ClickException(
f"{item_type.title()} {public_id} not found in agent's vendor items."
)
src = get_package_path(ctx.cwd, item_type, public_id)
dst = get_package_path(ctx.cwd, item_type, public_id, is_vendor=False)
if is_item_present(
ctx.cwd, ctx.agent_config, item_type, public_id, is_vendor=False
): # pragma: no cover
raise click.ClickException(
f"{item_type.title()} {public_id} is already a non-vendor package."
)
configuration = load_item_config(item_type, Path(src))
# get 'concrete' public id, in case it is 'latest'
component_prefix = ComponentType(item_type), public_id.author, public_id.name
component_id = get_latest_component_id_from_prefix(
ctx.agent_config, component_prefix
)
# component id is necessarily found, due to the checks above.
public_id = cast(ComponentId, component_id).public_id
package_id = PackageId(PackageType(item_type), public_id)
click.echo(
f"Ejecting item {package_id.package_type.value} {str(package_id.public_id)}"
)
# first, eject all the vendor packages that depend on this
item_remover = ItemRemoveHelper(ctx, ignore_non_vendor=True)
reverse_dependencies = (
item_remover.get_agent_dependencies_with_reverse_dependencies()
)
reverse_reachable_dependencies = reachable_nodes(
reverse_dependencies, {package_id.without_hash()}
)
# the reversed topological order of a graph
# is the topological order of the reverse graph.
eject_order = list(reversed(find_topological_order(reverse_reachable_dependencies)))
eject_order.remove(package_id)
if len(eject_order) > 0 and not quiet:
click.echo(f"The following vendor packages will be ejected: {eject_order}")
answer = click.confirm("Do you want to proceed?")
if not answer:
click.echo("Aborted.")
return
for dependency_package_id in eject_order:
# 'dependency_package_id' depends on 'package_id',
# so we need to eject it first
_eject_item(
ctx,
dependency_package_id.package_type.value,
dependency_package_id.public_id,
quiet=True,
)
# copy the vendor package into the non-vendor packages
ctx.clean_paths.append(dst)
copy_package_directory(Path(src), dst)
new_public_id = PublicId(cli_author, public_id.name, DEFAULT_VERSION)
current_version = Version(aea.__version__)
new_aea_range = (
configuration.aea_version
if configuration.aea_version_specifiers.contains(current_version)
else compute_specifier_from_version(current_version)
)
item_config_update = dict(
author=new_public_id.author,
version=new_public_id.version,
aea_version=new_aea_range,
)
update_item_config(item_type, Path(dst), None, **item_config_update)
update_item_public_id_in_init(item_type, Path(dst), new_public_id)
shutil.rmtree(src)
replace_all_import_statements(
Path(ctx.cwd), ComponentType(item_type), public_id, new_public_id
)
fingerprint_item(ctx, item_type, new_public_id)
package_hash = IPFSHashOnly.hash_directory(dst)
public_id_with_hash = PublicId(
new_public_id.author, new_public_id.name, new_public_id.version, package_hash
)
# update references in all the other packages
component_type = ComponentType(item_type_plural[:-1])
old_component_id = ComponentId(component_type, public_id)
new_component_id = ComponentId(component_type, public_id_with_hash)
update_references(ctx, {old_component_id: new_component_id})
# need to reload agent configuration with the updated references
try_to_load_agent_config(ctx)
# replace import statements in all the non-vendor packages
# fingerprint all (non-vendor) packages
fingerprint_all(ctx)
if with_symlinks:
click.echo(
"Adding symlinks from vendor to non-vendor and packages to vendor folders."
)
create_symlink_vendor_to_local(ctx, item_type, new_public_id)
create_symlink_packages_to_vendor(ctx)
click.echo(
f"Successfully ejected {item_type} {public_id} to {dst} as {public_id_with_hash}."
)
|
PypiClean
|
/dagster-toolbox-0.0.5.tar.gz/dagster-toolbox-0.0.5/dagster_toolbox/functions/dataframe.py
|
from datetime import datetime
def cast_dataframe(dataframe, columns):
for column_name, column_type in columns:
if column_type == "string":
dataframe[column_name] = dataframe[column_name].astype(
"str", errors="ignore"
)
elif column_type == "int":
dataframe[column_name] = dataframe[column_name].astype(
"int", errors="ignore"
)
elif column_type == "float":
dataframe[column_name] = dataframe[column_name].astype(
"float", errors="ignore"
)
elif column_type == "bool":
dataframe[column_name] = dataframe[column_name].astype(
"bool", errors="ignore"
)
elif column_type == "datetime":
dataframe[column_name] = dataframe[column_name].astype(
"datetime", errors="ignore"
)
elif column_type == "timedelta":
dataframe[column_name] = dataframe[column_name].astype(
"timedelta", errors="ignore"
)
elif column_type == "category":
dataframe[column_name] = dataframe[column_name].astype(
"category", errors="ignore"
)
else:
dataframe[column_name] = dataframe[column_name].astype(
"str", errors="ignore"
)
return dataframe
def get_date(value):
try:
return datetime.strptime(value, "%d/%m/%Y")
except ValueError:
return value
def get_float_to_string(value):
try:
return str(int(float(value)))
except ValueError:
return value
def get_float_from_string(value):
try:
return float(value)
except ValueError:
return value
def get_int_from_string(value):
try:
return int(value)
except ValueError:
return value
def get_string_from_float(value):
try:
return str(int(float(value)))
except ValueError:
return value
def get_human_input_removed(value):
if value == "...":
return ""
return value
|
PypiClean
|
/mroylib_min-2.2.5.tar.gz/mroylib_min-2.2.5/qlib/net/__requester.py
|
import random
from functools import wraps
from importlib import import_module
from urllib.parse import urlencode
import requests
from .config import RAW_HEADERS
from .agents import AGS, AG_NUM
def random_choice(lst, num=None):
if not num:
num = len(lst)
ix = random.randint(0, num-1)
return lst[ix]
# _methods = {}
def parameters(**pars):
return urlencode(pars)
def set_setssion():
return requests.session()
def session(url):
def _wrap(func):
session = requests.Session()
@wraps(func)
def run(*args, **kargs):
session.get(url)
kargs['session'] = session
return func(*args, **kargs)
return run
return _wrap
def to(url, data=None, ssl=False, method='get',
proxy=None,
cookie=False,
agent=False,
parser=None,
**option):
"""
@cookie [bool]
can use cookie to scarp cookie , return session, response
@proxy
proxy={
'https': 'socks5://127.0.0.1:1080',
'http': 'socks5://127.0.0.1:1080'
}
...
proxy='socks5://127.0.0.1:1080'
@ssl [bool]
can trans 'wwwxxx.xxxx' -> 'https://' xxxx
@data [dict]
post's payload
@agent [bool /str]
if True:
will use random agent from {....} [841]
if str:
will set User-agent: agent directly
@parser [str/None] 'bs4/lxml utf8/gbk'
import it as parser.
@options:
@headners
"""
User_Agent = None
session = None
parserlib = None
encoding = 'utf-8'
if agent == True:
User_Agent = random_choice(AGS, AG_NUM)
elif isinstance(agent, str):
User_Agent = agent
if parser in ("lxml", "bs", "bs4",):
if parser.startswith("b"):
parserlib = getattr(import_module("bs4"), 'BeautifulSoup')
else:
parserlib = getattr(import_module("lxml.etree"), 'HTML')
if len(parser.split()) ==2:
encoding = parser.split().pop()
if not url.startswith("http"):
if ssl:
url = 'https://' + url
else:
url = 'http://' + url
headers = RAW_HEADERS
headers['User-Agent'] = User_Agent
if 'headers' in option:
for k in option['headers']:
headers[k] = option['headers'][k]
if cookie:
session = requests.Session()
m = getattr(session, method)
else:
m = getattr(requests, method)
if 'session' in option:
m = getattr(option['session'], method)
if proxy:
if isinstance(proxy, dict):
pass
elif isinstance(proxy, str):
proxy = {
'http': proxy,
'https': proxy,
}
res = m(url, data=data, headers=headers, proxies=proxy)
else:
res = m(url, data=data, headers=headers)
if parserlib:
if res.status_code == 200:
res = parserlib(res.content.decode(encoding, "ignore"), "html.parser")
if cookie:
return session, res
return res
def network(url, data=None, method="get", **options):
if "session" in options:
m = getattr(options['session'], method)
else:
m = getattr(requests, method)
if not data:
req = m(url, headers=RAW_HEADERS)
req = m(url, data=data, headers=RAW_HEADERS)
def _wrap(func):
func.res = req
@wraps(func)
def __call_back(*args, **kargs):
return func(*args, **kargs)
return __call_back
return _wrap
|
PypiClean
|
/django_mazer-0.0.18-py3-none-any.whl/mazer/static/assets/vendors/tinymce/plugins/spellchecker/plugin.min.js
|
!function(){"use strict";var N=function(e){var t=e;return{get:function(){return t},set:function(e){t=e}}},e=tinymce.util.Tools.resolve("tinymce.PluginManager"),l=Object.hasOwnProperty,g=tinymce.util.Tools.resolve("tinymce.util.Tools"),d=tinymce.util.Tools.resolve("tinymce.util.URI"),f=tinymce.util.Tools.resolve("tinymce.util.XHR"),h=function(e){return e.getParam("spellchecker_rpc_url")},k=function(e){var t=new RegExp('[^\\s!"#$%&()*+,-./:;<=>?@[\\]^_{|}`\xa7\xa9\xab\xae\xb1\xb6\xb7\xb8\xbb\xbc\xbd\xbe\xbf\xd7\xf7\xa4\u201d\u201c\u201e\xa0\u2002\u2003\u2009]+',"g");return e.getParam("spellchecker_wordchar_pattern",t)};function p(e){return e&&1===e.nodeType&&"false"===e.contentEditable}var r=function(i,r){var n,a=[],v=r.dom,f=r.schema.getBlockElements(),h=r.schema.getWhiteSpaceElements(),g=r.schema.getShortEndedElements();function o(e){var t=i.getElementsByTagName("*"),n=[];e="number"==typeof e?""+e:null;for(var r=0;r<t.length;r++){var o=t[r],a=o.getAttribute("data-mce-index");null!==a&&a.length&&-1!==o.className.indexOf("mce-spellchecker-word")&&(a!==e&&null!==e||n.push(o))}return n}function c(e){for(var t=a.length;t--;)if(a[t]===e)return t;return-1}function e(e){for(var t=0,n=a.length;t<n&&!1!==e(a[t],t);t++);return this}function t(e){for(var t=o(e?c(e):null),n=t.length;n--;)!function(e){for(var t=e.parentNode;0<e.childNodes.length;)t.insertBefore(e.childNodes[0],e);t.removeChild(e)}(t[n]);return this}function s(e){var t=o(c(e)),n=r.dom.createRng();return n.setStartBefore(t[0]),n.setEndAfter(t[t.length-1]),n}var l=function u(e){var t;if(3===e.nodeType)return e.data;if(h[e.nodeName]&&!f[e.nodeName])return"";if(p(e))return"\n";if(t="",(f[e.nodeName]||g[e.nodeName])&&(t+="\n"),e=e.firstChild)for(;t+=u(e),e=e.nextSibling;);return t}(i);return{text:l,matches:a,each:e,filter:function(n){var r=[];return e(function(e,t){n(e,t)&&r.push(e)}),a=r,this},reset:function(){return a.splice(0,a.length),t(),this},matchFromElement:function(e){return a[e.getAttribute("data-mce-index")]},elementFromMatch:function(e){return o(c(e))[0]},find:function(e,t){if(l&&e.global)for(;n=e.exec(l);)a.push(function(e,t){if(!e[0])throw new Error("findAndReplaceDOMText cannot handle zero-length matches");return{start:e.index,end:e.index+e[0].length,text:e[0],data:t}}(n,t));return this},add:function(e,t,n){return a.push({start:e,end:e+t,text:l.substr(e,t),data:n}),this},wrap:function(e){function m(e,t){var n=a[t];n.stencil||(n.stencil=o(n));var r=n.stencil.cloneNode(!1);return r.setAttribute("data-mce-index",t),e&&r.appendChild(v.doc.createTextNode(e)),r}var o;return a.length&&function(e,t,n){var r,o,a,i,c,s=[],l=0,u=e,d=0;(t=t.slice(0)).sort(function(e,t){return e.start-t.start}),c=t.shift();e:for(;;){if((f[u.nodeName]||g[u.nodeName]||p(u))&&l++,3===u.nodeType&&(!o&&u.length+l>=c.end?(o=u,i=c.end-l):r&&s.push(u),!r&&u.length+l>c.start&&(r=u,a=c.start-l),l+=u.length),r&&o){if(u=n({startNode:r,startNodeIndex:a,endNode:o,endNodeIndex:i,innerNodes:s,match:c.text,matchIndex:d}),l-=o.length-i,o=r=null,s=[],d++,!(c=t.shift()))break}else if(h[u.nodeName]&&!f[u.nodeName]||!u.firstChild){if(u.nextSibling){u=u.nextSibling;continue}}else if(!p(u)){u=u.firstChild;continue}for(;;){if(u.nextSibling){u=u.nextSibling;break}if(u.parentNode===e)break e;u=u.parentNode}}}(i,a,(o=e,function(e){var t,n,r=e.startNode,o=e.endNode,a=e.matchIndex,i=v.doc;if(r===o){var c=r,s=c.parentNode;0<e.startNodeIndex&&(t=i.createTextNode(c.data.substring(0,e.startNodeIndex)),s.insertBefore(t,c));var l=m(e.match,a);return s.insertBefore(l,c),e.endNodeIndex<c.length&&(n=i.createTextNode(c.data.substring(e.endNodeIndex)),s.insertBefore(n,c)),c.parentNode.removeChild(c),l}t=i.createTextNode(r.data.substring(0,e.startNodeIndex)),n=i.createTextNode(o.data.substring(e.endNodeIndex));for(var u=m(r.data.substring(e.startNodeIndex),a),d=0,f=e.innerNodes.length;d<f;++d){var h=e.innerNodes[d],g=m(h.data,a);h.parentNode.replaceChild(g,h)}var p=m(o.data.substring(0,e.endNodeIndex),a);return(s=r.parentNode).insertBefore(t,r),s.insertBefore(u,r),s.removeChild(r),(s=o.parentNode).insertBefore(p,o),s.insertBefore(n,o),s.removeChild(o),p})),this},unwrap:t,replace:function(e,t){var n=s(e);return n.deleteContents(),0<t.length&&n.insertNode(r.dom.doc.createTextNode(t)),n},rangeFromMatch:s,indexOf:c}},u=function(e,t){var n;return t.get()||(n=r(e.getBody(),e),t.set(n)),t.get()},m=function(e,t,n,r,o,a,i){var c,s,l,u=e.getParam("spellchecker_callback");(u||(c=e,s=t,l=n,function(e,t,r,o){var n={method:e,lang:l.get()},a="";n["addToDictionary"===e?"word":"text"]=t,g.each(n,function(e,t){a&&(a+="&"),a+=t+"="+encodeURIComponent(e)}),f.send({url:new d(s).toAbsolute(h(c)),type:"post",content_type:"application/x-www-form-urlencoded",data:a,success:function(e){var t,n=JSON.parse(e);n?n.error?o(n.error):r(n):(t=c.translate("Server response wasn't proper JSON."),o(t))},error:function(){var e=c.translate("The spelling service was not found: (")+h(c)+c.translate(")");o(e)}})})).call(e.plugins.spellchecker,r,o,a,i)},y=function(t,e,n,r,o,a){i(t,n,r)||(t.setProgressState(!0),m(t,e,a,"spellcheck",u(t,r).text,function(e){S(t,n,r,o,e)},function(e){t.notificationManager.open({text:e,type:"error"}),t.setProgressState(!1),i(t,n,r)}),t.focus())},v=function(e,t,n){e.dom.select("span.mce-spellchecker-word").length||i(e,t,n)},o=function(t,e,n,r,o,a){t.selection.collapse(),a?g.each(t.dom.select("span.mce-spellchecker-word"),function(e){e.getAttribute("data-mce-word")===r&&t.dom.remove(e,!0)}):t.dom.remove(o,!0),v(t,e,n)},i=function(e,t,n){var r=e.selection.getBookmark();if(u(e,n).reset(),e.selection.moveToBookmark(r),n.set(null),t.get())return t.set(!1),e.fire("SpellcheckEnd"),!0},x=function(e){var t=e.getAttribute("data-mce-index");return"number"==typeof t?""+t:t},S=function(t,e,n,r,o){var a=!!o.dictionary,i=o.words;if(t.setProgressState(!1),function(e){for(var t in e)if(l.call(e,t))return!1;return!0}(i)){var c=t.translate("No misspellings found.");return t.notificationManager.open({text:c,type:"info"}),void e.set(!1)}r.set({suggestions:i,hasDictionarySupport:a});var s=t.selection.getBookmark();u(t,n).find(k(t)).filter(function(e){return!!i[e.text]}).wrap(function(e){return t.dom.create("span",{"class":"mce-spellchecker-word","aria-invalid":"spelling","data-mce-bogus":1,"data-mce-word":e.text})}),t.selection.moveToBookmark(s),e.set(!0),t.fire("SpellcheckStart")},b=function(){return(b=Object.assign||function(e){for(var t,n=1,r=arguments.length;n<r;n++)for(var o in t=arguments[n])Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o]);return e}).apply(this,arguments)},w="SpellcheckStart SpellcheckEnd",T=function(n,e,r,t,o,a){var i,c,s,l=(s=n,i=g.map(s.getParam("spellchecker_languages","English=en,Danish=da,Dutch=nl,Finnish=fi,French=fr_FR,German=de,Italian=it,Polish=pl,Portuguese=pt_BR,Spanish=es,Swedish=sv").split(","),function(e){var t=e.split("=");return{name:t[0],value:t[1]}}),c=[],g.each(i,function(e){c.push({selectable:!0,text:e.name,data:e.value})}),c),u=function(){y(n,e,r,t,a,o)},d={tooltip:"Spellcheck",onAction:u,icon:"spell-check",onSetup:function(e){var t=function(){e.setActive(r.get())};return n.on(w,t),function(){n.off(w,t)}}},f=b(b({},d),{type:"splitbutton",select:function(e){return e===o.get()},fetch:function(e){e(g.map(l,function(e){return{type:"choiceitem",value:e.data,text:e.text}}))},onItemAction:function(e,t){o.set(t)}});1<l.length?n.ui.registry.addSplitButton("spellchecker",f):n.ui.registry.addToggleButton("spellchecker",d),n.ui.registry.addToggleMenuItem("spellchecker",{text:"Spellcheck",icon:"spell-check",onSetup:function(e){e.setActive(r.get());var t=function(){e.setActive(r.get())};return n.on(w,t),function(){n.off(w,t)}},onAction:u})},A=function(c,s,e,l,u,d,f,h){var t=[],n=e.get().suggestions[f];return g.each(n,function(e){t.push({text:e,onAction:function(){c.insertContent(c.dom.encode(e)),c.dom.remove(h),v(c,l,u)}})}),e.get().hasDictionarySupport&&(t.push({type:"separator"}),t.push({text:"Add to dictionary",onAction:function(){var t,e,n,r,o,a,i;e=s,n=l,r=u,o=d,a=f,i=h,(t=c).setProgressState(!0),m(t,e,o,"addToDictionary",a,function(){t.setProgressState(!1),t.dom.remove(i,!0),v(t,n,r)},function(e){t.notificationManager.open({text:e,type:"error"}),t.setProgressState(!1)})}})),t.push.apply(t,[{type:"separator"},{text:"Ignore",onAction:function(){o(c,l,u,f,h)}},{text:"Ignore all",onAction:function(){o(c,l,u,f,h,!0)}}]),t},B=function(o,a,i,c,s,l){o.ui.registry.addContextMenu("spellchecker",{update:function(e){var t=e;if("mce-spellchecker-word"!==t.className)return[];var n=function(e,t){var n=[],r=g.toArray(e.getBody().getElementsByTagName("span"));if(r.length)for(var o=0;o<r.length;o++){var a=x(r[o]);null!==a&&a.length&&a===t.toString()&&n.push(r[o])}return n}(o,x(t));if(0<n.length){var r=o.dom.createRng();return r.setStartBefore(n[0]),r.setEndAfter(n[n.length-1]),o.selection.setRng(r),A(o,a,i,c,s,l,t.getAttribute("data-mce-word"),n)}}})};e.add("spellchecker",function(e,t){if(!1==(!!e.hasPlugin("tinymcespellchecker",!0)&&("undefined"!=typeof window.console&&window.console.log&&window.console.log("Spell Checker Pro is incompatible with Spell Checker plugin! Remove 'spellchecker' from the 'plugins' option."),!0))){var n=N(!1),r=N((x=(v=e).getParam("language","en"),v.getParam("spellchecker_language",x))),o=N(null),a=N(null);return T(e,t,n,o,r,a),B(e,t,a,n,o,r),f=t,h=n,g=o,p=a,m=r,(d=e).addCommand("mceSpellCheck",function(){y(d,f,h,g,p,m)}),i=e,c=n,s=a,l=o,u=r,{getTextMatcher:function(){return l.get()},getWordCharPattern:function(){return k(i)},markErrors:function(e){S(i,c,l,s,e)},getLanguage:function(){return u.get()}}}var i,c,s,l,u,d,f,h,g,p,m,v,x})}();
|
PypiClean
|
/larcv-3.5.0.tar.gz/larcv-3.5.0/src/json/doc/mkdocs/docs/api/basic_json/get_ref.md
|
# <small>nlohmann::basic_json::</small>get_ref
```cpp
template<typename ReferenceType>
ReferenceType get_ref();
template<typename ReferenceType>
const ReferenceType get_ref() const;
```
Implicit reference access to the internally stored JSON value. No copies are made.
## Template parameters
`ReferenceType`
: reference type; must be a reference to [`array_t`](array_t.md), [`object_t`](object_t.md),
[`string_t`](string_t.md), [`boolean_t`](boolean_t.md), [`number_integer_t`](number_integer_t.md), or
[`number_unsigned_t`](number_unsigned_t.md), [`number_float_t`](number_float_t.md), or [`binary_t`](binary_t.md).
Enforced by static assertion.
## Return value
reference to the internally stored JSON value if the requested reference type fits to the JSON value; throws
[`type_error.303`](../../home/exceptions.md#jsonexceptiontype_error303) otherwise
## Exception safety
Strong exception safety: if an exception occurs, the original value stays intact.
## Exceptions
Throws [`type_error.303`](../../home/exceptions.md#jsonexceptiontype_error303) if the requested reference type does not
match the stored JSON value type; example: `"incompatible ReferenceType for get_ref, actual type is binary"`.
## Complexity
Constant.
## Notes
!!! warning
Writing data to the referee of the result yields an undefined state.
## Examples
??? example
The example shows several calls to `get_ref()`.
```cpp
--8<-- "examples/get_ref.cpp"
```
Output:
```json
--8<-- "examples/get_ref.output"
```
## Version history
- Added in version 1.1.0.
- Extended to binary types in version 3.8.0.
|
PypiClean
|
/starlink-pywrapper-0.3.tar.gz/starlink-pywrapper-0.3/starlink/kappa_help/paldef.rst
|
PALDEF
======
Purpose
~~~~~~~
Loads the default palette to a colour table
Description
~~~~~~~~~~~
This application loads the standard palette of colours to fill the
portion of the current image display's colour table which is reserved
for the palette. The palette comprises 16 colours and is intended to
provide coloured annotations, borders, axes, graphs etc. that are
unaffected by changes to the lookup table used for images.
Pen 0 (the background colour) and pen 1 (the foreground colour) are
set to the default values for the specified graphics device. Thus they
may be white on black for an X window, but black on white for a
printer. The other colours in the standard palette are:
+ 2: Red
+ 3: Green
+ 4: Blue
+ 5: Yellow
+ 6: Magenta
+ 7: Cyan
+ 8 to 15: Black
Usage
~~~~~
::
paldef [device]
ADAM parameters
~~~~~~~~~~~~~~~
DEVICE = DEVICE (Read)
``````````````````````
Name of the image display to be used. [Current image-display device]
Examples
~~~~~~~~
paldef
This loads the standard palette into the reserved portion of the
colour table of the current image display.
paldef xwindows
This loads the standard palette into the reserved portion of the
colour table of the xwindows device.
Notes
~~~~~
+ The effects of this command will only be immediately apparent when
run on X windows which have 256 colours (or other similar pseudocolour
devices). On other devices (for instance, X windows with more than 256
colours) the effects will only become apparent when subsequent
graphics applications are run.
Related Applications
~~~~~~~~~~~~~~~~~~~~
KAPPA: PALENTRY, PALREAD, PALSAVE.
Copyright
~~~~~~~~~
Copyright (C) 1991, 1994 Science & Engineering Research Council.
Copyright (C) 1998-1999, 2004 Central Laboratory of the Research
Councils. All Rights Reserved.
Licence
~~~~~~~
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street,Fifth Floor, Boston, MA
02110-1301, USA
|
PypiClean
|
/math_econ_code-0.1.2.tar.gz/math_econ_code-0.1.2/meclp.py
|
import pandas as pd
import numpy as np
import scipy.sparse as spr
#import gurobipy as grb
import sympy
from sympy.solvers import solve
from sympy import *
import matplotlib.pyplot as plt
from tabulate import tabulate
#############################
# LP1: Intro to linear programming #
#############################
def load_stigler_data(verbose=False):
import pandas as pd
thepath = 'https://raw.githubusercontent.com/math-econ-code/mec_optim_2021-01/master/data_mec_optim/lp_stigler-diet/'
filename = 'StiglerData1939.txt'
thedata = pd.read_csv(thepath + filename, sep='\t')
thedata = thedata.dropna(how = 'all')
commodities = (thedata['Commodity'].values)[:-1]
allowance = thedata.iloc[-1, 4:].fillna(0).transpose()
if verbose:
print('Daily nutrient content:')
print(tabulate(thedata.head()))
print('\nDaily nutrient requirement:')
print(allowance)
return({'N_i_j':thedata.iloc[:-1, 4:].fillna(0).transpose().values,
'd_i':np.array(allowance),
'c_j':np.ones(len(commodities)),
'nbi':len(allowance),
'nbj':len(commodities),
'names_i': list(thedata.columns)[4:],
'names_j':commodities})
def print_optimal_diet(q_j):
print('***Optimal solution***')
total,thelist = 0.0, []
for j,commodity in enumerate(commodities):
if q_j[j] > 0:
total += q_j[j] * 365
thelist.append([commodity,q_j[j]])
thelist.append(['Total cost (optimal):', total])
print(tabulate(thelist))
#########################
# LP2: The simplex algorithm #
#########################
def round_expr(expr, num_digits):
return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(Number)})
class Tableau():
def __init__(self, names_basic, names_nonbasic, A_i_j, b_i, c_j): # z = d - A @ x
self.nonbasic = list(symbols(names_nonbasic))
self.base = { Symbol('obj') : c_j @ self.nonbasic }
self.base.update( { list(symbols(names_basic))[i]: b_i[i] - (A_i_j @ self.nonbasic)[i] for i in range(len(b_i))} )
def variables(self):
return( list(self.base.keys())[1:] + self.nonbasic)
def evaluate(self,thevar):
if thevar in set(self.nonbasic):
return 0.0
else:
return float(self.base[thevar].evalf(subs = {variable:0.0 for variable in self.nonbasic} ))
def display(self):
for i in self.base:
print(i,' = ' ,round_expr(self.base[i],2))
def print_solution(self,title=None):
if not (title is None):
print(title)
for var in self.base:
print(str(var)+'='+str(self.base[var].subs([ (variable,0) for variable in self.nonbasic])))
def determine_entering(self):
self.nonbasic.sort(key=str) # Bland's rule
for entering_var in self.nonbasic:
if diff(self.base[Symbol('obj')],entering_var)>0 :
return entering_var
return None # If no entering variable found, None returned
def determine_departing(self,entering_var):
runmin = float('inf')
departing_var = None
for var in self.base.keys() - {Symbol('obj')}:
the_expr_list = solve(self.base[var] - var,entering_var)
if the_expr_list: # if one can invert the previous expression
the_expr = the_expr_list[0] # express entering variable as a function of the other ones:
val_entering_var = the_expr.subs([ (variable,0) for variable in [var]+self.nonbasic])
if (val_entering_var >= 0) & (val_entering_var < runmin) :
runmin,departing_var = val_entering_var, var
return departing_var # if no variable is found, None returned
def pivot(self, entering_var, departing_var, verbose = 0):
expr_entering = solve(self.base[departing_var] - departing_var, entering_var)[0]
for var in self.base:
self.base[var] = self.base[var].subs([(entering_var, expr_entering)])
self.base[entering_var] = expr_entering
del self.base[departing_var]
self.nonbasic.remove(entering_var)
self.nonbasic.append(departing_var)
if verbose > 0:
print('Entering = ' + str( entering_var)+'; departing = '+ str( departing_var))
if verbose > 1:
print(str( entering_var)+' = '+str(round_expr(expr_entering,2)))
return expr_entering
def simplex_loop(dual_tableau,primal_tableau,primals):
entering_var = dual_tableau.determine_entering()
if entering_var is None:
print('Optimal solution found.\n=======================')
primal_tableau.print_solution('Basic primal variables:')
dual_tableau.print_solution('Basic dual variables:')
else:
departing_var = dual_tableau.determine_departing(entering_var)
if departing_var is None:
print('Unbounded solution.')
else:
expr_entering_var = dual_tableau.pivot(entering_var,departing_var, verbose= 1)
_ = primal_tableau.pivot(primals[departing_var],primals[entering_var])
return False # not finished
return True # finished
def plot_path_so_far(the_path):
def Tableau_plot_path_so_far(self,the_path):
if self.A_i_j.shape[1] != 2:
print('Can\'t plot the solution in 2D, the number primal variables differs from two.')
return()
x1, x2 = np.meshgrid(np.linspace(-0.2, 1.4, 400), np.linspace(-0.2, 1.4, 400))
feasible_region = (x1 >= 0) & (x2 >= 0)
for i in range(len(self.bi)):
feasible_region = feasible_region & ( self.A_i_j[i,0]*x1 + self.A_i_j[i,1]*x2 <= self.b_i[i] )
fig, ax = plt.subplots(figsize=(5, 5))
plt.contourf(x1, x2, np.where(feasible_region, self.c_j[0]*x1+self.c_j[1]*x2, np.nan), 50, alpha = 0.5, cmap='gray_r', levels=30)
plt.text(0.4, 0.4, 'Feasible solutions', color = 'white', ha='center', va='center')
for i in range(len(self.bi)):
ax.plot(x1[0, :], self.b_i[i] / self.A_i_j[i,1] - (self.A_i_j[i,0]/self.A_i_j[i,1])*x1[0, :], label='s'+str(i)+ ' = 0')
ax.plot([a for (a,_) in the_path], [b for (_,b) in the_path], 'r--', label='Algorithm path')
ax.scatter([a for (a,_) in the_path], [b for (_,b) in the_path], color='red')
for i, bfs in enumerate(the_path):
plt.text(bfs[0], bfs[1], 'BFS'+str(i), color = 'red', ha='left', va='bottom')
ax.set_xlim(-0.2, 1.4), ax.set_ylim(-0.2, 1.4)
ax.set_xlabel('x1'), ax.set_ylabel('x2')
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.legend()
plt.show()
#########################
# LP3: Interior Point Methods #
#########################
class InteriorPoint():
def __init__(self,A,b,c,current_point=None):
self.A = A
self.b = b
self.c = c
self.current_point = current_point
self.α = 1 - (1/8)/(1/5 + np.sqrt(len(c))) # shrinkage coeff α given by Freund & Vera
def update(self, verbose=0):
x, y, s, θ = self.current_point
Δy = np.linalg.solve(self.A @ np.diag(1/s) @ np.diag(x) @ self.A.T, θ * self.A @ (1/s) - self.b)
Δs = self.A.T @ Δy
Δx = - x - np.diag(1/s) @ np.diag(x) @ Δs + θ * (1/s)
self.current_point = [x+Δx, y+Δy, s+Δs, self.α*θ]
return self.current_point
def loop(self, tol=1e-6, max_iter=100, verbose=0):
current_point = self.current_point
new_point = IPM_update(self)
if all(abs(np.concatenate(new_point[:-1]) - np.concatenate(current_point[:-1])) < tol):
print('Optimal solution found.\n=======================')
for i in range(len(new_point[0])): print("x_" + str(i+1), "=", new_point[0][i])
else:
if verbose > 1:
for i in range(len(new_point[0])): print("x_" + str(i+1), "=", new_point[0][i])
return False # not finished
return True # finished
|
PypiClean
|
/mfn_sdk-0.8.9.tar.gz/mfn_sdk-0.8.9/mfn_sdk/workflow.py
|
import requests
import base64
import json
import random
import sys
import time
import logging
from .deprecated import deprecated
#logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class Execution(object):
""" Execution represents the execution of a workflow that can be referenced by its execution ID
an execution object is returned from asynchronous workflow invocations
"""
def __init__(self, client, url, exec_id):
self.client=client
self.url=url
self.execution_id = exec_id
def get(self, timeout=60):
try:
r = self.client._s.post(self.url,
params = {"executionId": self.execution_id},
timeout=timeout)
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
raise Exception("Retrieving result of workflow from URL '"+self.url+"' failed due to "+type(e).__name__).with_traceback(sys.exc_info()[2])
r.raise_for_status()
return r.json()
class Workflow(object):
""" Workflow represents a registered workflow, every method invocation or property assignment results in one or more calls to management functions
"""
def __init__(self,client,wf):
self.client=client
self.id=wf["id"]
self._name=wf["name"]
self._modified=wf["modified"]
self._status=wf.get("status",None)
self._endpoints=wf.get("endpoints",None)
self._deployment_error = ""
self._json=None
def __str__(self):
if self._status == "deployed":
return f"{self.id} ({self._name}, status: {self._status}, endpoints: {self._endpoints})"
else:
return f"{self.id} ({self._name}, status: {self._status})"
@property
def name(self):
# TODO: workflow name could have been updated, decide if we should fetch workflow status
return self._name
@name.setter
def name(self,name):
res = self.client.action('modifyWorkflow',{'workflow':{'id':self.id,'name':name,'runtime':self._runtime}})
self._name = name
@property
def modified(self):
# TODO: workflow modification date could have been updated, decide if we should fetch workflow status
return self._modified
@property
def status(self):
data = self.client.action('getWorkflows',{'workflow':{'id':self.id}})
self._status = data['workflow']['status']
if self._status == "deployed":
self._endpoints = data['workflow']['endpoints']
else:
self._endpoints = None
if self._status == 'failed' and "deployment_error" in data['workflow']:
self._deployment_error = data['workflow']['deployment_error']
return self._status
def get_deployment_error(self):
return self._deployment_error
@property
def endpoint(self):
if self.status == 'deployed':
return random.choice(self._endpoints)
else:
return None
@property
def endpoints(self):
if self.status == 'deployed':
return self._endpoints
else:
return None
@property
def json(self):
if not self._json:
data = self.client.action('getWorkflowJSON',{'workflow':{'id':self.id}})
self._json = base64.b64decode(data['workflow']['json']).decode().replace("\r","\n")
return self._json
@json.setter
def json(self,json):
if json != self.json:
self._json = json
self.client.action('uploadWorkflowJSON',{'workflow':{'id':self.id,'json':base64.b64encode(self._json.encode()).decode()}})
def deploy(self, timeout=None):
""" deploy a workflow and optionally wait in linearly increasing multiples of 1000ms
:timeout: By default returns after calling deploy on the workflow without waiting for it to be actually deployed.
If timeout is set to a numeric <= 0, it waits indefinitely in intervals of 1000ms, 2000ms, 3000ms, ...
If timeout is set to a numeric > 0, it waits for the workflow to be deployed in increasing multiples of 100ms, but no longer than the timeout. When the timeout expires and the workflow is not deployed, the function raises an Exception
"""
s = self.status
if s == 'deployed':
log.debug("deploy: wf %s already deployed",self.name)
return
elif s == 'deploying':
log.debug("deploy: wf %s already being deployed",self.name)
elif s == 'failed':
log.debug("deploy: wf %s cannot be deployed", self.name)
log.debug("deployment error: %s", self._deployment_error)
else:
self.client.action('deployWorkflow',{'workflow':{'id':self.id}})
# if timeout is None, do not wait but return immediately even if it's not yet deployed
if timeout is None:
return
sleep = 1
if timeout > 0:
# if timeout > 0, wait in increasing intervals but raise Exception if it's not deployed until the timeout expires
t = time.time()
end = t + timeout
while t < end:
s = self.status
if s == 'deployed' or s == 'failed':
print()
return
print("Waiting for deployment to come online; passed so far: " + str(round(t-end+timeout, 2)) + " seconds", end=" \r")
sys.stdout.flush()
t = time.time()
if sleep < (end-t):
time.sleep(sleep)
sleep += 1
else:
time.sleep(max(0,end-t))
raise Exception("Deployment attempt timed out (%d)"%timeout)
else:
# if timeout <=0, wait in increasing intervals until deployed, even if this means forever
while True:
s = self.status
if s == 'deployed' or s == 'failed':
return
time.sleep(sleep)
sleep += 1
def undeploy(self, timeout=None):
""" undeploy a workflow and optionally wait in linearly increasing multiples of 100ms
:timeout: By default returns after calling undeploy on the workflow without waiting for it to be actually undeployed.
If timeout is set to a numeric <= 0, it waits indefinitely in intervals of 100ms, 200ms, 300ms, ...
If timeout is set to a numeric > 0, it waits for the workflow to be undeployed in increasing multiples of 100ms, but no longer than the timeout. When the timeout expires and the workflow is not undeployed, the function raises an Exception
"""
if self.status == 'undeployed':
log.debug("undeploy: wf %s not deployed",self.name)
return
if self.status == 'undeploying':
log.debug("undeploy: wf %s is already being undeployed",self.name)
else:
self.client.action('undeployWorkflow',{'workflow':{'id':self.id}})
# if timeout is None, do not wait but return immediately even if it's not yet deployed
if timeout is None:
return
sleep = 1
if timeout > 0:
# if timout > 0, wait in increasing intervals but raise Exception if it's not undeployed until the timeout expires
end = timeout + time.time()
t = 0
while t < end:
if self.status == 'undeployed':
return
t = time.time()
if sleep < (end-t):
time.sleep(sleep)
sleep += 1
else:
time.sleep(end-t)
raise Exception("Deployment attempt timed out (%d)"%timeout)
else:
# if timeout <=0, wait in increasing intervals until undeployed, even if this means forever
while True:
if self.status == 'undeployed':
return
time.sleep(sleep)
sleep += 1
def execute_async(self,data,timeout=30):
""" execute a workflow asynchronously and returns an Execution object
The function delivers an event to the frontend and returns an Execution object. Note that the timeout here applies to the delivery of the event, another timeout can be used when fetching the result with the Execution.get(timeout) method
see Execution execute_async execute_async.get()
:param data: the event dictionary passed to the workflow
:type data: dict()
:param timeout: time in seconds to wait for the event delivery to complete, otherwise throws ReadTimeout
:type timeout: int
:return: an Execution object to fetch the result
:rtype: Execution
:raises requests.exceptions.HTTPError: when the HTTP request to deliver the event fails
:raises requests.exceptions.ConnectionError: when the platform can not be reached
:raises requests.exceptions.ReadTimeout: when reading the HTTP result of delivering the event times out
:raises ValueError: in case the response is not JSON
"""
if self._status != "deployed":
raise Exception("Workflow not deployed: " + self.name)
# we are already deployed and have the endpoints stored in self._endpoints
url = random.choice(self._endpoints)
try:
r = self.client._s.post(url,
params={'async':'True'},
json=data,
allow_redirects=False,
timeout=timeout)
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
raise Exception("Asynchronous execution of workflow at URL '"+url+"' failed due to "+type(e).__name__)
r.raise_for_status()
exec_id = r.text
return Execution(self.client, url, exec_id)
def execute(self,data,timeout=60, check_duration=False):
""" execute a workflow synchronously
The function sends an event to the frontend and waits for the result in the HTTP response
:param data: the event dictionary passed to the workflow
:type data: dict()
:param timeout: time in seconds to wait for the workflow to complete, otherwise throws ReadTimeout
:type timeout: int
:return: the result of the workflow execution
:rtype: dict()
:raises requests.exceptions.HTTPError: when the HTTP request to execute the workflow fails (e.g. 500 ServerError)
:raises requests.exceptions.ConnectionError: when the platform can not be reached
:raises requests.exceptions.ReadTimeout: when reading the HTTP response with the workflow result times out
:raises ValueError: in case the response is not JSON
"""
#if self.status != 'deployed':
# self.deploy(-1)
if self._status != "deployed":
raise Exception("Workflow not deployed: " + self.name)
# we are already deployed and have the endpoints stored in self._endpoints
url = random.choice(self._endpoints)
try:
#postdata = {}
#postdata["value"] = json.dumps(data)
#postdata = json.dumps(postdata)
if check_duration:
t_start = time.time()
r = self.client._s.post(url,
params={},
json=data,
#headers={'Content-Type':'application/json'},
#data=postdata,
#headers={'Content-Type':'application/x-www-form-urlencoded'},
#data=postdata,
timeout=timeout)
if check_duration:
t_total = (time.time() - t_start) * 1000.0
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
raise Exception("Execution of workflow '"+self.name+"' at URL '"+url+"' failed due to "+type(e).__name__)
r.raise_for_status()
if check_duration:
return r.json(), t_total
return r.json()
def logs(self, clear=False, ts_earliest=0.0, num_lines=500):
""" fetch logs of this workflow
:clear: default=False; if True, the function calls clear_logs() before returning
:returns: a dict {'exceptions':<str>,'progress':<str>,'log':<str>}
"""
#print("earliest: " + str(ts_earliest))
data = self.client.action('retrieveAllWorkflowLogs',{'workflow':{'id':self.id, 'ts_earliest': ts_earliest, 'num_lines': num_lines}})
res = {'exceptions':base64.b64decode(data['workflow']['exceptions']).decode(),
'progress':base64.b64decode(data['workflow']['progress']).decode(),
'log':base64.b64decode(data['workflow']['log']).decode(),
'timestamp': data['workflow']['timestamp']}
if clear:
self.clear_logs()
return res
def clear_logs(self):
""" clear logs of this workflow """
try:
self.client.action('clearAllWorkflowLogs',{'workflow':{'id':self.id}})
except requests.exceptions.HTTPError as e:
e.strerror += "while trying to clearAllWorkflowLogs for wf '"+self.name+"'/"+self.id
raise e
def delete_logs(self):
""" alias for clear_logs() """
self.clear_logs()
def get_functions(self):
fnames = []
wfjson = json.loads(self.json)
if 'States' in wfjson:
for sname,state in list(wfjson['States'].items()):
if 'Resource' in state:
fnames.append(state['Resource'])
elif 'grains' in wfjson:
for gdict in wfjson['grains']:
fnames.append(gdict['name'])
functions = []
for f in self.client.functions:
if f._name in fnames:
functions.append(f)
fnames.remove(f.name)
if len(fnames) > 0:
log.warn("Could not find all functions of workflow %s, missing %s"%(self.name,str(fnames)))
return functions
def bind_trigger(self,trigger):
self.client.bind_trigger(trigger._name, self._name)
def unbind_trigger(self,trigger):
self.client.unbind_trigger(trigger._name, self._name)
def bind_bucket(self,bucket):
self.client.bind_bucket(bucket._name, self._name)
def unbind_bucket(self,trigger):
self.client.unbind_bucket(bucket._name, self._name)
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/users/item/device_enrollment_configurations/item/assignments/assignments_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ......models import enrollment_configuration_assignment, enrollment_configuration_assignment_collection_response
from ......models.o_data_errors import o_data_error
from .count import count_request_builder
from .item import enrollment_configuration_assignment_item_request_builder
class AssignmentsRequestBuilder():
"""
Provides operations to manage the assignments property of the microsoft.graph.deviceEnrollmentConfiguration entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new AssignmentsRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/users/{user%2Did}/deviceEnrollmentConfigurations/{deviceEnrollmentConfiguration%2Did}/assignments{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def by_enrollment_configuration_assignment_id(self,enrollment_configuration_assignment_id: str) -> enrollment_configuration_assignment_item_request_builder.EnrollmentConfigurationAssignmentItemRequestBuilder:
"""
Provides operations to manage the assignments property of the microsoft.graph.deviceEnrollmentConfiguration entity.
Args:
enrollment_configuration_assignment_id: Unique identifier of the item
Returns: enrollment_configuration_assignment_item_request_builder.EnrollmentConfigurationAssignmentItemRequestBuilder
"""
if enrollment_configuration_assignment_id is None:
raise Exception("enrollment_configuration_assignment_id cannot be undefined")
from .item import enrollment_configuration_assignment_item_request_builder
url_tpl_params = get_path_parameters(self.path_parameters)
url_tpl_params["enrollmentConfigurationAssignment%2Did"] = enrollment_configuration_assignment_id
return enrollment_configuration_assignment_item_request_builder.EnrollmentConfigurationAssignmentItemRequestBuilder(self.request_adapter, url_tpl_params)
async def get(self,request_configuration: Optional[AssignmentsRequestBuilderGetRequestConfiguration] = None) -> Optional[enrollment_configuration_assignment_collection_response.EnrollmentConfigurationAssignmentCollectionResponse]:
"""
The list of group assignments for the device configuration profile
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[enrollment_configuration_assignment_collection_response.EnrollmentConfigurationAssignmentCollectionResponse]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ......models import enrollment_configuration_assignment_collection_response
return await self.request_adapter.send_async(request_info, enrollment_configuration_assignment_collection_response.EnrollmentConfigurationAssignmentCollectionResponse, error_mapping)
async def post(self,body: Optional[enrollment_configuration_assignment.EnrollmentConfigurationAssignment] = None, request_configuration: Optional[AssignmentsRequestBuilderPostRequestConfiguration] = None) -> Optional[enrollment_configuration_assignment.EnrollmentConfigurationAssignment]:
"""
Create new navigation property to assignments for users
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[enrollment_configuration_assignment.EnrollmentConfigurationAssignment]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_post_request_information(
body, request_configuration
)
from ......models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from ......models import enrollment_configuration_assignment
return await self.request_adapter.send_async(request_info, enrollment_configuration_assignment.EnrollmentConfigurationAssignment, error_mapping)
def to_get_request_information(self,request_configuration: Optional[AssignmentsRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The list of group assignments for the device configuration profile
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
def to_post_request_information(self,body: Optional[enrollment_configuration_assignment.EnrollmentConfigurationAssignment] = None, request_configuration: Optional[AssignmentsRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Create new navigation property to assignments for users
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@property
def count(self) -> count_request_builder.CountRequestBuilder:
"""
Provides operations to count the resources in the collection.
"""
from .count import count_request_builder
return count_request_builder.CountRequestBuilder(self.request_adapter, self.path_parameters)
@dataclass
class AssignmentsRequestBuilderGetQueryParameters():
"""
The list of group assignments for the device configuration profile
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "count":
return "%24count"
if original_name == "expand":
return "%24expand"
if original_name == "filter":
return "%24filter"
if original_name == "orderby":
return "%24orderby"
if original_name == "search":
return "%24search"
if original_name == "select":
return "%24select"
if original_name == "skip":
return "%24skip"
if original_name == "top":
return "%24top"
return original_name
# Include count of items
count: Optional[bool] = None
# Expand related entities
expand: Optional[List[str]] = None
# Filter items by property values
filter: Optional[str] = None
# Order items by property values
orderby: Optional[List[str]] = None
# Search items by search phrases
search: Optional[str] = None
# Select properties to be returned
select: Optional[List[str]] = None
# Skip the first n items
skip: Optional[int] = None
# Show only the first n items
top: Optional[int] = None
@dataclass
class AssignmentsRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[AssignmentsRequestBuilder.AssignmentsRequestBuilderGetQueryParameters] = None
@dataclass
class AssignmentsRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/edpop_explorer-0.4.2-py3-none-any.whl/edpop_explorer/sparqlreader.py
|
from typing import Dict, List, Optional
from dataclasses import dataclass, field as dataclass_field
from SPARQLWrapper import SPARQLWrapper, JSON, SPARQLExceptions
from edpop_explorer.apireader import APIReader, APIRecord, APIException
PREFIXES = {
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'schema': 'http://schema.org/',
'owl': 'http://www.w3.org/2002/07/owl#',
}
PREFIXES_REVERSE_REPLACEMENT_TABLE = {
PREFIXES[key]: (key + ':') for key in PREFIXES
}
PREFIX_DEFINITIONS = '\n'.join([
f'prefix {key}: <{PREFIXES[key]}>' for key in PREFIXES
])
prepare_listing_query = (PREFIX_DEFINITIONS + """
select ?s ?name where
{{
?s ?p ?o .
?s {name_predicate} ?name .
{filter}
FILTER (regex(?o, "{query}","i"))
}}
order by ?s
""").format
prepare_lookup_query = """
prefix schema: <http://schema.org/>
select ?p ?o
{{
<{identifier}> ?p ?o
}}
""".format
def replace_fqu_with_prefixed_uris(inputstring: str) -> str:
'''Replace fully qualified URIs to prefixed URIs if they occur in
the prefix table in the prefixes attribute'''
for key in PREFIXES_REVERSE_REPLACEMENT_TABLE:
inputstring = inputstring.replace(
key, PREFIXES_REVERSE_REPLACEMENT_TABLE[key], 1
)
return inputstring
@dataclass
class SparqlRecord(APIRecord):
name: str = None
identifier: str = None
sparql_endpoint: str = None
fetched: bool = False
fields: dict = dataclass_field(default_factory=dict)
def fetch(self) -> None:
if self.fetched:
return
wrapper = SPARQLWrapper(self.sparql_endpoint)
wrapper.setReturnFormat(JSON)
wrapper.setQuery(prepare_lookup_query(identifier=self.identifier))
try:
response = wrapper.queryAndConvert()
except SPARQLExceptions.QueryBadFormed as err:
raise APIException(
'Malformed SPARQL query: {}'.format(err)
)
results = response['results']['bindings']
for result in results:
self.fields[result['p']['value']] = result['o']['value']
self.fetched = True
def get_title(self) -> str:
return self.name
def show_record(self) -> str:
self.fetch()
field_strings = []
if self.link:
field_strings.append('URL: ' + self.link)
for field in self.fields:
fieldstring = replace_fqu_with_prefixed_uris(field)
field_strings.append(
'{}: {}'.format(fieldstring, self.fields[field])
)
return '\n'.join(field_strings)
def __repr__(self):
return self.get_title()
class SparqlReader(APIReader):
url: str = None
filter: str = None
wrapper: SPARQLWrapper
records: List[SparqlRecord]
name_predicate: str = None
def __init__(self):
self.wrapper = SPARQLWrapper(self.url)
self.wrapper.setReturnFormat(JSON)
def prepare_query(self, query: str):
self.prepared_query = prepare_listing_query(
name_predicate=self.name_predicate,
filter=self.filter,
query=query
)
def fetch(self):
if not self.prepared_query:
raise APIException('First call prepare_query method')
self.wrapper.setQuery(self.prepared_query)
try:
response = self.wrapper.queryAndConvert()
except SPARQLExceptions.QueryBadFormed as err:
raise APIException(
'Malformed SPARQL query: {}'.format(err)
)
results = response['results']['bindings']
self.records = []
self.number_of_results = len(results)
for result in results:
record = SparqlRecord(
identifier=result['s']['value'],
sparql_endpoint=self.url,
link=result['s']['value'],
name=result['name']['value'],
)
self.records.append(record)
self.number_fetched = self.number_of_results
def fetch_next(self):
pass
|
PypiClean
|
/intel_tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/ops/parallel_for/pfor.py
|
"""Compiled parallel-for loop."""
# pylint: disable=missing-docstring,g-direct-tensorflow-import
import collections
from functools import partial
import string
import sys
import traceback
import numpy as np
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.core.framework import full_type_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import array_ops_stack
from tensorflow.python.ops import cond as tf_cond
from tensorflow.python.ops import control_flow_assert
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_switch_case
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_optional_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import while_loop
from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
# TODO(agarwal): remove flag.
flags.DEFINE_bool(
"op_conversion_fallback_to_while_loop", True,
"DEPRECATED: Flag is ignored.")
def _variant_handle_data(t):
"""Fetches handle data for a variant tensor `t`, or None if unavailable."""
handle_data = resource_variable_ops.get_eager_safe_handle_data(t)
if not handle_data.is_set:
return None
return handle_data.shape_and_type
def _variant_type_id(t):
"""Returns the full_type_pb2 type of `t`, or None if it is not available."""
if t.dtype != dtypes.variant:
return None
shapes_and_types = _variant_handle_data(t)
if shapes_and_types is None or not shapes_and_types:
# TODO(b/169968286): Identify all variant tensors (e.g. maps) and we can
# make this an error instead of assuming TensorLists have handle data.
return None # Presumed not a TensorList/Optional
return shapes_and_types[0].type.type_id
_INTERNAL_STACKING_TYPE_IDS = (
full_type_pb2.TFT_ARRAY,
full_type_pb2.TFT_OPTIONAL)
def _is_variant_with_internal_stacking(t):
"""Identifies variant tensors which pfor always maintains as scalars.
For these, the pfor tensor is recorded as "stacked" if the content of the
variant tensor (e.g. the elements of a TensorList) are all stacked.
Args:
t: A tensor to identify.
Returns:
True if `t` is a TensorList/Optional, False not, None if unknown.
"""
type_id = _variant_type_id(t)
return type_id in _INTERNAL_STACKING_TYPE_IDS
def _parse_variant_shapes_and_types(t):
"""Extracts shape and dtype information from a variant tensor `t`."""
shapes_and_types = _variant_handle_data(t)
if shapes_and_types is None or not shapes_and_types:
raise ValueError("Required handle data not set for {!r}".format(t))
if shapes_and_types[0].type.type_id == full_type_pb2.TFT_ARRAY:
return shapes_and_types
else:
if shapes_and_types[0].type.type_id == full_type_pb2.TFT_UNSET:
return shapes_and_types
else:
raise ValueError(
"Attempted to stack a variant-dtype tensor with no type set ({!r})"
.format(t))
def _stack(t, length):
"""stacks `t` `length` times."""
# Note that this stacking may currently be triggered, for example, when a
# loop invariant tensor with dtype variant is input to a while_loop which then
# produces a loop dependent output. Simply stacking the variants may not be
# suitable since operations on stacked handles may expect a vectorized version
# of the variant.
if t.dtype == dtypes.variant:
shapes_and_types = _parse_variant_shapes_and_types(t)
if shapes_and_types[0].type.type_id == full_type_pb2.TFT_ARRAY:
if len(shapes_and_types) != 1:
raise ValueError(
f"Expected handle data of length 1, got {shapes_and_types!r} of "
f"length {len(shapes_and_types)}.")
return wrap(
_stack_tensor_list(t, shapes_and_types[0].dtype, length),
True)
else:
raise ValueError(
"Attempted to stack an unhandled variant-dtype tensor of "
f"type {shapes_and_types[0].type!r} ({t!r}).")
ones = array_ops.ones_like(array_ops.shape(t))
ones = array_ops.reshape(ones, [-1])
length = array_ops.reshape(length, [-1])
multiples = array_ops.concat([length, ones], 0)
t = array_ops.tile(array_ops.expand_dims(t, 0), multiples)
return wrap(t, True)
# The following stateful ops can be safely called once, and with the same
# signature as the unconverted version, if their inputs are loop invariant.
# TODO(agarwal): implement a strategy for converting Variable reads/writes. The
# plan is to map each read/write in the loop_fn to a corresponding merged
# read/write in the converted graph. Writes need to be mergeable (e.g.
# AssignAdd) to be used in `pfor`. Given a certain read/write order in the
# loop_fn, doing a one-to-one conversion will simulate executing such
# instructions in lock-step across all iterations.
passthrough_stateful_ops = set([
"VariableV2",
"VarHandleOp",
"VariableShape",
"ReadVariableOp",
"StackV2",
"TensorArrayWriteV3",
"TensorArrayReadV3",
"TensorArraySizeV3",
])
# Ops which we will treat like stateful for the purpose of vectorization.
# Typically this is used to force pfor converters to run for these ops.
force_stateful_ops = set([
# We vectorize this since we need to change the element shape set on the
# list.
"TensorListReserve",
])
def _is_stateful_pfor_op(op):
if isinstance(op, WhileOp):
return op.is_stateful
if op.type == "Const":
# Const didn't have an op_def.
return False
if op.type in passthrough_stateful_ops:
return False
if op.type in force_stateful_ops:
return True
assert hasattr(op, "op_def") and op.op_def is not None, op
return op.op_def.is_stateful
# pylint: disable=protected-access
class WhileOp:
"""Object for storing state for converting the outputs of a while_loop."""
def __init__(self, exit_node, pfor_ops, fallback_to_while_loop, pfor_config):
"""Initializer.
Args:
exit_node: A tensor output from the while_loop.
pfor_ops: list of ops inside the current pfor loop.
fallback_to_while_loop: If True, fallback to while loop when conversion of
an op is not supported
pfor_config: PForConfig object used while constructing loop body.
"""
self._fallback_to_while_loop = fallback_to_while_loop
self._pfor_config = pfor_config
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set(x._id for x in pfor_ops)
assert isinstance(exit_node, ops.Tensor)
self._while_context = exit_node.op._get_control_flow_context()
assert isinstance(self._while_context, control_flow_ops.WhileContext)
self._context_name = self._while_context.name
self._condition = self._while_context.pivot.op.inputs[0]
# Parts of an external while_loop could be created inside a pfor loop.
# However for the purpose here, we declare such loops to be external. Also
# note that we check if the condition was created inside or outside to
# determine if the while_loop was first created inside or outside.
# TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
if self._is_inside_loop:
for e in self._while_context.loop_exits:
assert self.op_is_inside_loop(e.op)
# Note the code below tries to reverse engineer an existing while_loop graph
# by assuming the following pattern of nodes.
#
# NextIteration <---- Body <--- Enter
# | ^
# V ___| Y
# Enter -> Merge -> Switch___
# ^ | N
# | V
# LoopCond Exit
# Node that elements in the list below correspond one-to-one with each
# other. i.e. these lists are the same size, and the i_th entry corresponds
# to different Operations/Tensors of a single cycle as illustrated above.
# List of Switch ops (ops.Operation) that feed into an Exit Node.
self._exit_switches = []
# List of inputs (ops.Tensor) to NextIteration.
self._body_outputs = []
# List of list of control inputs of the NextIteration nodes.
self._next_iter_control_inputs = []
# List of Merge ops (ops.Operation).
self._enter_merges = []
# List of output (ops.Tensor) of Exit nodes.
self._outputs = []
# List of Enter Tensors.
# There are two types of Enter nodes:
# - The Enter nodes that are used in the `loop_vars` argument to
# `while_loop` (see
# https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
# these Enter nodes immediately below by tracing backwards from the Exit
# nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
# diagram above. This allows us to have a 1:1 correspondence between the
# self._outputs and the first elements in self._enters.
# - The Enter nodes that are used only by the body. They don't appear in the
# `loop_vars` and are not returned from the `while_loop`. In Python code,
# they are usually captured by the body lambda. We collect them below by
# iterating over all the ops in the graph. They are appended to the end of
# self._enters or self._direct_enters, and don't correspond to any outputs
# in self._outputs. Note that we keep the resource/variant Enter nodes in
# self._direct_enters and the constructed while_loop's body uses them
# directly as opposed to passing them as loop variables. This is done
# because the while_body cannot partition the resource/variant Tensors, so
# it has to leave them unchanged.
self._enters = []
self._direct_enters = []
for e in self._while_context.loop_exits:
self._outputs.append(e.op.outputs[0])
switch = e.op.inputs[0].op
assert switch.type == "Switch", switch
self._exit_switches.append(switch)
merge = switch.inputs[0].op
assert merge.type == "Merge", merge
self._enter_merges.append(merge)
enter = merge.inputs[0].op
assert enter.type == "Enter", enter
self._enters.append(enter.outputs[0])
next_iter = merge.inputs[1].op
assert next_iter.type == "NextIteration", next_iter
self._body_outputs.append(next_iter.inputs[0])
self._next_iter_control_inputs.append(next_iter.control_inputs)
# Collect all the Enter nodes that are not part of `loop_vars`, the second
# category described above.
# Also track whether the loop body has any stateful ops.
self._is_stateful = False
for op in ops.get_default_graph().get_operations():
# TODO(agarwal): make sure this works with nested case.
control_flow_context = op._get_control_flow_context()
if control_flow_context is None:
continue
if control_flow_context.name == self._context_name:
self._is_stateful |= _is_stateful_pfor_op(op)
if op.type == "Enter":
output = op.outputs[0]
if output not in self._enters:
if output.dtype in (dtypes.resource, dtypes.variant):
if output not in self._direct_enters:
self._direct_enters.append(output)
else:
self._enters.append(output)
def __str__(self):
"""String representation."""
return "while_loop(%s)" % self.name
@property
def inputs(self):
"""Input to all the Enter nodes."""
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
@property
def control_inputs(self):
"""Control input to all the Enter nodes."""
control_inputs = []
for x in self._enters + self._direct_enters:
control_inputs.extend(x.op.control_inputs)
return control_inputs
@property
def outputs(self):
"""Outputs of all the Exit nodes."""
return self._outputs
@property
def name(self):
"""Context name for the while loop."""
return self._context_name
@property
def is_inside_loop(self):
"""Returns true if the while_loop was created inside the pfor."""
return self._is_inside_loop
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
@property
def is_stateful(self):
return self._is_stateful
@property
def pfor_converter(self):
"""Return a converter for the while loop."""
return self
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
inputs_stacked):
"""Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
"""
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(
loop_var,
loop_len,
pfor_ops=self._pfor_ops,
all_indices=indices,
all_indices_partitioned=cond_stacked,
fallback_to_while_loop=self._fallback_to_while_loop,
pfor_config=self._pfor_config)
# Map all inputs of Enter nodes in self._direct_enters to their converted
# values.
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
enter_input)
# Since these are resources / variants, they should be unstacked.
assert not stacked and not is_sparse_stacked, (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
# Map all Enter nodes to the inputs.
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
# Map outputs of Switch and Merge.
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
# Note that second output of Merge is typically not used, except possibly
# as a control dependency. To avoid trying to output the correct value, we
# employ a hack here. We output a dummy invalid value with an incorrect
# dtype. This will allow control dependency to work but if using it as an
# input, it should typically lead to errors during graph construction due
# to dtype mismatch.
# TODO(agarwal): Check in the original graph to see if there are any
# consumers of this Tensor that use it as an input.
pfor._add_conversion(merge.outputs[1],
wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
# Don't need to worry about switch.output[0] which will feed to Exit node.
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
def _convert_enter(self, parent_pfor, enter):
"""Converts an Enter node."""
inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
control_inputs = []
for x in enter.op.control_inputs:
converted = parent_pfor._convert_helper(x)
if not isinstance(converted, ops.Operation):
converted = converted.t
control_inputs.append(converted)
if control_inputs:
with ops.control_dependencies(control_inputs):
inp = array_ops.identity(inp)
return inp, stacked
def _maybe_stacked(self, cache, inp):
"""Heuristic to figure out if the converting inp leads to a stacked value.
Args:
cache: map from Tensor to boolean indicating stacked/unstacked.
inp: input Tensor.
Returns:
True if `inp` could get stacked. If the function returns False, the
converted value should be guaranteed to be unstacked. If returning True,
it may or may not be stacked.
"""
if inp in cache:
return cache[inp]
if not self.op_is_inside_loop(inp.op):
return False
op = inp.op
output = False
if op.type in [
"Shape",
"Rank",
"ShapeN",
"ZerosLike",
"TensorArrayV3",
"TensorArraySizeV3",
]:
output = False
elif _is_stateful_pfor_op(op):
# This may be fairly aggressive.
output = True
elif op.type == "Exit":
# This may be fairly aggressive.
output = True
else:
for t in op.inputs:
if self._maybe_stacked(cache, t):
output = True
break
cache[inp] = output
return output
def _create_init_values(self, pfor_input):
"""Create arguments passed to converted while_loop."""
with ops.name_scope("while_init"):
loop_len_vector = pfor_input.pfor.loop_len_vector
loop_len = loop_len_vector[0]
num_outputs = len(self._outputs)
inputs = []
maybe_stacked_cache = {}
# Convert all the Enters. Need to do this before checking for stacking
# below.
for i, enter in enumerate(self._enters):
inp, stacked = self._convert_enter(pfor_input.pfor, enter)
inputs.append(inp)
maybe_stacked_cache[enter] = stacked
# Since this enter node is part of the `loop_vars`, it corresponds to an
# output and its preceding switch. We mark this switch's output the same
# stackness, to act at the base case for the logic below. Below, we will
# be going through the body figuring out which inputs might need to be
# stacked and which inputs can safely remain unstacked.
if i < num_outputs:
maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
# Shape invariants for init_values corresponding to self._enters.
input_shape_invariants = []
# TensorArrays for outputs of converted while loop
output_tas = []
# Shape invariants for output TensorArrays.
ta_shape_invariants = []
# List of booleans indicating stackness of inputs, i.e. tensors
# corresponding to self._enters.
inputs_stacked = []
for i, inp in enumerate(inputs):
enter = self._enters[i]
inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
# Note that even when an input is unstacked, the body could make it
# stacked. we use a heuristic below to figure out if body may be making
# it stacked.
if i < num_outputs:
body_output = self._body_outputs[i]
if enter.op in self._pfor_ops:
body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
body_output)
else:
# If constructed outside of pfor loop, then the output would not be
# stacked.
body_output_stacked = False
if body_output_stacked and not inp_stacked:
inp = _stack(inp, loop_len_vector).t
inputs[i] = inp
inp_stacked = True
# TODO(agarwal): other attributes for the TensorArray ?
output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
ta_shape_invariants.append(tensor_shape.TensorShape(None))
inputs_stacked.append(inp_stacked)
input_shape_invariants.append(tensor_shape.TensorShape(None))
# See documentation for __call__ for the structure of init_values.
init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
# TODO(agarwal): try stricter shape invariants
shape_invariants = (
[tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)] + input_shape_invariants +
ta_shape_invariants)
return init_values, inputs_stacked, shape_invariants
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is unstacked.
Note that all iterations end together. So we don't need to partition the
inputs. When all iterations are done, we write the inputs to the
TensorArrays. Note that we only write to index 0 of output_tas. Since all
iterations end together, they can all be output together.
"""
not_all_done = array_ops.reshape(conditions, [])
new_output_tas = []
# pylint: disable=cell-var-from-loop
for i, out_ta in enumerate(output_tas):
inp = inputs[i]
new_output_tas.append(
tf_cond.cond(not_all_done, lambda: out_ta,
lambda: out_ta.write(0, inp)))
# pylint: enable=cell-var-from-loop
return not_all_done, indices, inputs, new_output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
num_outputs = len(self._outputs)
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
# TODO(agarwal): avoid this stacking. See TODO earlier in
# _process_cond_unstacked.
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
new_inputs.append(new_inp)
# For iterations that are done, write them to TensorArrays.
if i < num_outputs:
out_ta = output_tas[i]
# Note that done_indices can be empty. done_inp should also be empty in
# that case.
new_output_tas.append(out_ta.scatter(done_indices, done_inp))
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(self, pfor_input, inputs_stacked, new_indices, cond_stacked,
new_inputs, not_all_done):
"""Convert the body function."""
def true_fn(control_inputs, body_pfor, body_output, stacked):
"""Converts the body function for all but last iteration.
This essentially converts body_output. Additionally, it needs to handle
any control dependencies on the NextIteration node. So it creates another
Identity node with the converted dependencies.
"""
converted_control_inp = []
for x in control_inputs:
for t in x.outputs:
converted_control_inp.append(body_pfor._convert_helper(t).t)
if stacked:
# Note convert always does the stacking.
output = body_pfor.convert(body_output)
else:
output, convert_stacked, _ = body_pfor._convert_helper(body_output)
assert convert_stacked == stacked, body_output
with ops.control_dependencies(converted_control_inp):
return array_ops.identity(output)
body_pfor = self._init_pfor(pfor_input.pfor, new_indices, cond_stacked,
new_inputs, inputs_stacked)
new_outputs = []
for i, (body_output,
stacked) in enumerate(zip(self._body_outputs, inputs_stacked)):
control_inp = self._next_iter_control_inputs[i]
out_dtype = body_output.dtype
# Note that we want to run the body only if not all pfor iterations are
# done. If all are done, we return empty tensors since these values will
# not be used. Notice that the value returned by the loop is based on
# TensorArrays and not directly on these returned values.
# pylint: disable=cell-var-from-loop
new_output = tf_cond.cond(
not_all_done,
lambda: true_fn(control_inp, body_pfor, body_output, stacked),
lambda: constant_op.constant([], dtype=out_dtype))
# pylint: enable=cell-var-from-loop
new_outputs.append(new_output)
return new_outputs
def __call__(self, pfor_input):
"""Converter for the while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the iterations that are not
done.
args: Remaining arguments. These can be divided into 3 categories:
- First set of arguments are the tensors that correspond to the initial
elements of self._enters. The elements that appear in original while
loop's `loop_vars`.
- The second set of arguments are the tensors that correspond to the
remaining elements of self._enters. These are the tensors that directly
enter the original while loop body.
- Finally, the last set of arguments are TensorArrays. These TensorArrays
correspond to the outputs of the original while_loop, i.e. to the
elements in self._outputs. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the
i'th iteration of pfor. Note that elements can be written into these
tensors arrays in any order, depending on when the corresponding pfor
iteration is done.
If the original while_loop had `k` tensors in its `loop_vars` and its body
directly captured `m` tensors, the `args` will contain `2 * k + m` values.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Args:
pfor_input: A PForInput object corresponding to the output of any Exit
node from this while loop.
Returns:
List of converted outputs.
"""
# Create init_values that will be passed to the while_loop.
init_values, inputs_stacked, shape_invariants = self._create_init_values(
pfor_input)
# Note that we use a list as a hack since we need the nested function body
# to set the value of cond_is_stacked. python2.x doesn't support nonlocal
# variables.
cond_is_stacked = [None]
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentation for __call__ for the structure of *args.
num_enters = len(self._enters)
inputs = args[:num_enters]
output_tas = args[num_enters:]
# TODO(agarwal): see which outputs have consumers and only populate the
# TensorArrays corresponding to those. Or do those paths get trimmed out
# from inside the while_loop body?
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set cond_stacked to True here. At this point we don't
# know if it could be loop invariant, hence the conservative value is
# to assume stacked.
cond_pfor = self._init_pfor(
pfor_input.pfor,
indices,
cond_stacked=True,
inputs=inputs,
inputs_stacked=inputs_stacked)
conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices, new_inputs,
new_output_tas) = self._process_cond_unstacked(conditions, indices,
inputs, output_tas)
else:
(not_all_done, new_indices, new_inputs,
new_output_tas) = self._process_cond_stacked(conditions, indices,
inputs, inputs_stacked,
output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs = self._process_body(pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done)
# Note that the first num_outputs new values of inputs are computed using
# the body. Rest of them were direct Enters into the condition/body and
# the partitioning done earlier is sufficient to give the new value.
num_outputs = len(self._outputs)
new_args = ([not_all_done, new_indices] + new_outputs +
list(new_inputs[num_outputs:]) + new_output_tas)
return tuple(new_args)
while_outputs = while_loop.while_loop(
cond, body, init_values, shape_invariants=shape_invariants)
output_tas = while_outputs[-len(self._outputs):]
outputs = []
assert cond_is_stacked[0] is not None
for inp_stacked, ta in zip(inputs_stacked, output_tas):
if cond_is_stacked[0]:
outputs.append(wrap(ta.stack(), True))
else:
# Note that if while_loop condition is unstacked, all iterations exit at
# the same time and we wrote those outputs in index 0 of the tensor
# array.
outputs.append(wrap(ta.read(0), inp_stacked))
return outputs
class ConversionNotImplementedError(Exception):
pass
class _PforInput:
"""Input object passed to registered pfor converters."""
__slots__ = ["pfor", "_op", "_inputs"]
def __init__(self, pfor, op, inputs):
"""Creates a _PforInput object.
Args:
pfor: PFor converter object.
op: the Operation object that is being converted.
inputs: list of WrappedTensor objects representing converted values of the
inputs of `op`.
"""
self.pfor = pfor
self._op = op
self._inputs = inputs
def stack_inputs(self, stack_indices=None, tile_variants=False):
"""Stacks unstacked inputs at `stack_indices`.
Args:
stack_indices: indices of inputs at which stacking is done. If None,
stacking is done at all indices.
tile_variants: If True, affected indices which have a variant dtype will
be tiled after this operation to match the expected shape of a
vectorized tensor. Variants generally need to be un-tiled when they are
inputs to operations and tiled when returned.
"""
if stack_indices is None:
stack_indices = range(len(self._inputs))
length = self.pfor.loop_len_vector
for i in stack_indices:
inp = self._inputs[i]
is_variant = inp.t.dtype == dtypes.variant
if not inp.is_stacked:
self._inputs[i] = _stack(inp.t, length)
if tile_variants and is_variant:
self._inputs[i] = wrap(
_tile_variant_with_length(self._inputs[i].t, length), True)
elif not tile_variants and is_variant:
self._inputs[i] = wrap(_untile_variant(self._inputs[i].t), True)
def expanddim_inputs_for_broadcast(self):
"""Reshapes stacked inputs to prepare them for broadcast.
Since stacked inputs have an extra leading dimension, automatic broadcasting
rules could incorrectly try to expand dimensions before that leading
dimension. To avoid that, we reshape these stacked inputs to the maximum
rank they will need to be broadcasted to.
"""
if not self._inputs:
return
# Find max rank
def _get_rank(x):
rank = array_ops.rank(x.t)
if not x.is_stacked:
rank += 1
return rank
ranks = [_get_rank(x) for x in self._inputs]
max_rank = ranks[0]
for rank in ranks[1:]:
max_rank = math_ops.maximum(rank, max_rank)
for i, inp in enumerate(self._inputs):
if inp.is_stacked:
shape = array_ops.shape(inp.t)
rank_diff = array_ops.reshape(max_rank - ranks[i], [1])
ones = array_ops.tile([1], rank_diff)
new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)
self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True)
@property
def inputs(self):
return self._inputs
@property
def num_inputs(self):
return len(self._inputs)
def input(self, index):
assert len(self._inputs) > index, (index, self._inputs)
return self._inputs[index]
def stacked_input(self, index):
t, is_stacked, _ = self.input(index)
if not is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ConversionNotImplementedError(
f"Input {input_name} of op '{op_type}' expected to be not loop "
"invariant.")
return t
def unstacked_input(self, index):
t, is_stacked, _ = self.input(index)
if is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ConversionNotImplementedError(
f"Input {input_name} of op '{op_type}' expected to be loop "
"invariant.")
return t
@property
def op(self):
return self._op
@property
def op_type(self):
return self._op.type
def get_attr(self, attr):
return self._op.get_attr(attr)
@property
def outputs(self):
return self._op.outputs
def output(self, index):
assert index < len(self._op.outputs)
return self._op.outputs[index]
_pfor_converter_registry = {}
class RegisterPFor:
"""Utility to register converters for pfor.
Usage:
@RegisterPFor(foo_op_type)
def _foo_converter(pfor_input):
...
The above will register conversion function `_foo_converter` for handling
conversion of `foo_op_type`. These converters are called during vectorization
of a `pfor` loop body. For each operation node in this loop body,
the vectorization process will call the converter corresponding to the
operation type of the node.
During conversion, the registered function will be called with a single
argument `pfor_input`, of type `PForInput`, which will contain state needed
for the conversion. When the converter is called for a node, all its inputs
should already have been converted and these converted values are stored in
`pfor_input.inputs`. This registered function should output a list of
WrappedTensor objects with the same length as the number of outputs of the
node being converted. If the node had zero outputs, then it should return an
ops.Operation object. These new sets of nodes should implement the
functionality of running that operation for the number of iterations specified
by `pfor_input.pfor.loop_len_vector[0]` where the inputs of the node for each
iteration are picked from `pfor_inputs.inputs()`.
One tricky aspect of the conversion process is keeping track of, and
leveraging loop invariance of computation. Each converted input is a
WrappedTensor which indicates whether the input was loop invariant or not. If
the converted value is loop invariant, its rank should match the rank of the
corresponding tensor in the loop body, else its rank is larger by 1. The
converter should look at the loop invariance of the inputs and generate new
nodes based on that. Note that the converter will not be called if all inputs
are loop invariant and the operation is not stateful. The converter should
determine if its own output is loop invariant and `wrap` its output
accordingly.
Example:
Here, the converter is trying to convert a Reshape node in the loop body. This
node will have two inputs: the tensor to reshape, and the new shape. The
example here only handles the case where the shape is loop invariant.
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
# We assume that input is not loop invariant. Call to `stacked_input`
# asserts that and returns the converted value. This value will have a rank
# larger by 1 compared to the rank of the input in the loop body.
t = pfor_input.stacked_input(0)
# We assume that shape input is loop invariant. Call to `unstacked_input`
# asserts that and returns the converted value.
shape = pfor_input.unstacked_input(1)
# We compute `new_shape` by prepending the number of iterations to the
# original shape.
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape],
axis=0)
# The vectorized output involves reshaping the converted input `t` using
# `new_shape`.
new_output = array_ops.reshape(t, new_shape)
# The converted output is marked as not loop invariant using the call to
# wrap.
return wrap(new_output, True)
"""
def __init__(self, op_type):
"""Creates an object to register a converter for op with type `op_type`."""
self.op_type = op_type
def __call__(self, converter):
name = self.op_type
assert name not in _pfor_converter_registry, "Re-registering %s " % name
_pfor_converter_registry[name] = converter
return converter
class RegisterPForWithArgs(RegisterPFor):
"""Utility to register converters for pfor.
Usage:
@RegisteRPFor(foo_op_type, foo=value, ....)
def _foo_converter(pfor_input, foo=None, ....):
...
See RegisterPFor for details on the conversion function.
`RegisterPForWithArgs` allows binding extra arguments to the
conversion function at registration time.
"""
def __init__(self, op_type, *args, **kw_args):
super(RegisterPForWithArgs, self).__init__(op_type)
self._args = args
self._kw_args = kw_args
def __call__(self, converter):
def _f(pfor_input):
return converter(pfor_input, self.op_type, *self._args, **self._kw_args)
super(RegisterPForWithArgs, self).__call__(_f)
return converter
# TODO(agarwal): call raw_ops instead of calling these low level routines.
def _create_op(op_type, inputs, op_dtypes, attrs=None):
"""Utility to create an op."""
op = ops.get_default_graph().create_op(
op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)
flat_attrs = []
# The tape expects an alternating flat list of names and attribute values.
for a in attrs:
flat_attrs.append(str(a))
flat_attrs.append(op.get_attr(str(a)))
execute.record_gradient(op_type, op.inputs, tuple(flat_attrs), op.outputs[:])
return op
WrappedTensor = collections.namedtuple("WrappedTensor",
["t", "is_stacked", "is_sparse_stacked"])
"""Wrapper around the result of a Tensor conversion.
The additional fields are useful for keeping track of the conversion state as
data flows through the ops in the loop body. For every op whose output is a
Tensor, its converter should return either a WrappedTensor or a list of
WrappedTensors.
Args:
t: The converted tensor
is_stacked: True if the tensor is stacked, i.e. represents the results of all
the iterations of the loop, where each row i of the tensor corresponds to
that op's output on iteration i of the loop. False if the tensor is not
stacked, i.e. represents the result of the op on of a single iteration of
the loop, where the result does not vary between iterations.
is_sparse_stacked: True if the tensor corresponds to a component tensor
(indices, values, or dense_shape) of a sparse tensor, and has been logically
stacked via a sparse conversion.
"""
def wrap(tensor, is_stacked=True, is_sparse_stacked=False):
"""Helper to create a WrappedTensor object."""
assert isinstance(is_stacked, bool)
assert isinstance(is_sparse_stacked, bool)
assert isinstance(tensor, ops.Tensor)
assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is "
"stacked via a sparse "
"conversion, it must also be "
"stacked.")
return WrappedTensor(tensor, is_stacked, is_sparse_stacked)
def _wrap_and_tile_variants(tensor, length):
if tensor.dtype == dtypes.variant:
tensor = _tile_variant_with_length(tensor, length)
return wrap(tensor)
def _fallback_converter(pfor_input, root_cause="", warn=False):
msg = ("Using a while_loop for converting "
f"{pfor_input.op_type} cause {root_cause}")
if warn:
logging.warning(msg)
else:
logging.debug(msg)
output_dtypes = [x.dtype for x in pfor_input.outputs]
iter_vec = pfor_input.pfor.loop_len_vector
# Use constant value if available, so that output shapes are static.
iter_vec_value = tensor_util.constant_value(iter_vec)
if iter_vec_value is not None:
iters = iter_vec_value[0].item()
else:
iters = iter_vec[0]
def while_body(i, *ta_list):
"""Body of while loop."""
inputs = [
x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs
]
op_outputs = _create_op(
pfor_input.op_type,
inputs,
output_dtypes,
attrs=pfor_input.op.node_def.attr).outputs
outputs = []
# TODO(agarwal): Add tf.debugging asserts to check that the shapes across
# the different iterations are the same.
for out, ta in zip(op_outputs, ta_list):
assert isinstance(out, ops.Tensor)
outputs.append(ta.write(i, out))
return tuple([i + 1] + outputs)
ta_list = while_loop.while_loop(
lambda i, *ta: i < iters, while_body, [0] +
[tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes
])[1:]
return tuple([wrap(ta.stack(), True) for ta in ta_list])
class PForConfig:
"""A configuration object used to communicate with loop body function."""
def __init__(self):
# This may be set to the number of iterations.
self._maybe_iters = None
# Map from reduction node, created by `reduce`, to the bundle of reduction
# function and arguments.
self._reduce_map = {}
def _has_reductions(self):
"""True if some reductions where performed by loop body."""
return len(self._reduce_map)
def _set_iters(self, iters):
"""Set number of pfor iterations."""
if isinstance(iters, ops.Tensor):
iters = tensor_util.constant_value(iters)
self._maybe_iters = iters
def reduce(self, fn, *args):
"""Performs reduction `fn` on `args` vectorized across pfor iterations.
Note that `fn` is traced once inside the loop function context. Hence any
captures or side-effects will happen in that context. Call to the traced
version of `fn` happens during the construction of the vectorized code.
Note that this currently may not work inside a control flow construct.
Args:
fn: a reduction function. It will be called with arguments that have the
same structure as *args but with individual values whose rank may be
higher by 1 since they represent loop invariant vectorized versions of
the corresponding Tensors in *args.
*args: unvectorized Tensors.
Returns:
The result of running `fn` on the vectorized versions of `*args`. These
outputs will be available as loop invariant values to all the iterations.
"""
assert not context.executing_eagerly()
# Creates a concrete function that will be used for reduction.
tensor_specs = []
for arg in args:
if not isinstance(arg, ops.Tensor):
raise ValueError(f"Got a non-Tensor argument {arg} in reduce.")
batched_shape = tensor_shape.TensorShape([self._maybe_iters
]).concatenate(arg.shape)
tensor_specs.append(
tensor_spec.TensorSpec(shape=batched_shape, dtype=arg.dtype))
concrete_function = def_function.function(fn).get_concrete_function(
*tensor_specs)
# Creates PlaceholderWithDefault and IdentityN nodes corresponding the
# reduction.
pl_outputs = []
with ops.control_dependencies(args):
for output in concrete_function.outputs:
if not isinstance(output, ops.Tensor):
raise ValueError(f"Got a non-Tensor output {output} while running "
"reduce.")
# Note that we use placeholder_with_default just to make XLA happy since
# it does not like placeholder ops.
if output.shape.is_fully_defined():
dummy = array_ops.zeros(output.shape.as_list(), dtype=output.dtype)
pl_outputs.append(
array_ops.placeholder_with_default(dummy, shape=output.shape))
else:
# TODO(agarwal): support case when under XLA and output.shape is not
# fully defined.
pl_outputs.append(
array_ops.placeholder(output.dtype, shape=output.shape))
reduction_op = array_ops.identity_n(pl_outputs)[0].op
self._reduce_map[reduction_op] = (concrete_function, args)
if len(reduction_op.outputs) == 1:
return reduction_op.outputs[0]
else:
return tuple(reduction_op.outputs)
# TODO(agarwal): handle reductions inside control flow constructs.
def reduce_concat(self, x):
"""Performs a concat reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has rank one higher than `x`. The value is the vectorized
version of `x`, i.e. stacking the value of `x` across different pfor
iterations.
"""
return self.reduce(lambda y: y, x)
def reduce_mean(self, x):
"""Performs a mean reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the mean of the values
of `x` across the pfor iterations.
"""
return self.reduce(lambda y: math_ops.reduce_mean(y, axis=0), x)
def reduce_sum(self, x):
"""Performs a sum reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the sum of the values
of `x` across the pfor iterations.
"""
return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x)
def _lookup_reduction(self, t):
"""Lookups Tensor `t` in the reduction maps."""
assert isinstance(t, ops.Tensor), t
return self._reduce_map.get(t.op)
class PFor:
"""Implementation of rewrite of parallel-for loops.
This class takes a DAG or a set of DAGs representing the body of a
parallel-for loop, and adds new operations to the graph that implements
functionality equivalent to running that loop body for a specified number of
iterations. This new set of nodes may or may not use a tensorflow loop
construct.
The process of conversion does not delete or change any existing operations.
It only adds operations that efficiently implement the equivalent
functionality. We refer to the added ops as "converted ops".
The conversion process uses a simple greedy heuristic. It walks the loop body
and tries to express the functionality of running each node in a loop with a
new set of nodes. When converting an op several cases are possible:
- The op is not inside the loop body. Hence it can be used as is.
- The op does not depend on the iteration number and is stateless. In this
case, it can be used as is.
- The op is not stateful, and depends on iteration number only through control
dependencies. In this case, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is not stateful, and all its inputs are loop invariant. In this
case, similar to above, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is stateful or at least one of the inputs is not loop invariant. In
this case, we run the registered converter for that op to create a set of
converted ops. All nodes in the set will have converted control dependencies
corresponding to control dependencies of the original op. If the op returned
multiple outputs, "converted outputs" could be produced by different ops in
this set.
"""
def __init__(self,
loop_var,
loop_len,
pfor_ops,
fallback_to_while_loop,
all_indices=None,
all_indices_partitioned=False,
pfor_config=None,
warn=False):
"""Creates an object to rewrite a parallel-for loop.
Args:
loop_var: ops.Tensor output of a Placeholder operation. The value should
be an int32 scalar representing the loop iteration number.
loop_len: A scalar or scalar Tensor representing the number of iterations
the loop is run for.
pfor_ops: List of all ops inside the loop body.
fallback_to_while_loop: If True, on failure to vectorize an op, a while
loop is used to sequentially execute that op.
all_indices: If not None, an int32 vector with size `loop_len`
representing the iteration ids that are still active. These values
should be unique and sorted. However they may not be contiguous. This is
typically the case when inside a control flow construct which has
partitioned the indices of the iterations that are being converted.
all_indices_partitioned: If True, this object is being constructed from a
control flow construct where not all the pfor iterations are guaranteed
to be active.
pfor_config: PForConfig object used while constructing the loop body.
warn: Whether or not to warn on while loop conversions.
"""
assert isinstance(loop_var, ops.Tensor)
assert loop_var.op.type == "PlaceholderWithDefault"
self._loop_var = loop_var
loop_len_value = tensor_util.constant_value(loop_len)
if loop_len_value is not None:
loop_len = loop_len_value
self._loop_len_vector = ops.convert_to_tensor([loop_len])
else:
self._loop_len_vector = array_ops.reshape(loop_len, [1])
self._all_indices_partitioned = all_indices_partitioned
if all_indices_partitioned:
assert all_indices is not None
self.all_indices = (
math_ops.range(loop_len) if all_indices is None else all_indices)
self._conversion_map = object_identity.ObjectIdentityDictionary()
self._conversion_map[loop_var] = wrap(self.all_indices, True)
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set(x._id for x in pfor_ops)
self._fallback_to_while_loop = fallback_to_while_loop
self._warn = warn
self._pfor_config = pfor_config
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
def _convert_sparse(self, y):
"""Returns the converted value corresponding to SparseTensor y.
For SparseTensors, instead of stacking the component tensors separately,
resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
rank) respectively for indices, values, and dense_shape (where N is the loop
length and m is the number of sparse tensor values per loop iter), we want
to logically stack the SparseTensors, to create a SparseTensor whose
components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
respectively.
Here, we try to get the conversion of each component tensor.
If the tensors are stacked via a sparse conversion, return the resulting
SparseTensor composed of the converted components. Otherwise, the component
tensors are either unstacked or stacked naively. In the latter case, we
unstack the component tensors to reform loop_len SparseTensor elements,
then correctly batch them.
The unstacked tensors must have the same rank. Each dimension of each
SparseTensor will expand to be the largest among all SparseTensor elements
for that dimension. For example, if there are N SparseTensors of rank 3
being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
Args:
y: A tf.sparse.SparseTensor.
Returns:
A tf.sparse.SparseTensor that is the converted value corresponding to y.
"""
outputs = [
self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
]
assert all(isinstance(o, WrappedTensor) for o in outputs)
if all(w.is_sparse_stacked for w in outputs):
return sparse_tensor.SparseTensor(*[w.t for w in outputs])
assert not any(w.is_sparse_stacked for w in outputs), (
"Error converting SparseTensor. All components should be logically "
"stacked, or none.")
# If component tensors were not sparsely stacked, they are either unstacked
# or stacked without knowledge that they are components of sparse tensors.
# In this case, we have to restack them.
return self._restack_sparse_tensor_logically(
*[self._unwrap_or_tile(w) for w in outputs])
def _restack_sparse_tensor_logically(self, indices, values, shape):
sparse_tensor_rank = indices.get_shape().dims[-1].value
if sparse_tensor_rank is not None:
sparse_tensor_rank += 1
def fn(args):
res = gen_sparse_ops.serialize_sparse(
args[0], args[1], args[2], out_type=dtypes.variant)
return res
# Applies a map function to the component tensors to serialize each
# sparse tensor element and batch them all, then deserializes the batch.
# TODO(rachelim): Try to do this without map_fn -- add the right offsets
# to shape and indices tensors instead.
result = map_fn.map_fn(fn, [indices, values, shape], dtype=dtypes.variant)
return sparse_ops.deserialize_sparse(
result, dtype=values.dtype, rank=sparse_tensor_rank)
def _unwrap_or_tile(self, wrapped_tensor):
"""Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
if is_stacked:
return output
else:
return _stack(output, self._loop_len_vector).t
def convert(self, y):
"""Returns the converted value corresponding to y.
Args:
y: A ops.Tensor or a ops.Operation object. If latter, y should not have
any outputs.
Returns:
If y does not need to be converted, it returns y as is. Else it returns
the "converted value" corresponding to y.
"""
if y is None:
return None
if isinstance(y, sparse_tensor.SparseTensor):
return self._convert_sparse(y)
assert isinstance(y, (ops.Tensor, ops.Operation)), y
output = self._convert_helper(y)
if isinstance(output, WrappedTensor):
assert isinstance(y, ops.Tensor)
return self._unwrap_or_tile(output)
else:
assert isinstance(y, ops.Operation)
assert not y.outputs
assert isinstance(output, ops.Operation)
return output
def _was_converted(self, t):
"""True if t is not a conversion of itself."""
converted_t = self._conversion_map[t]
return converted_t.t is not t
def _add_conversion(self, old_output, new_output):
assert isinstance(old_output, (ops.Tensor, ops.Operation)), old_output
assert isinstance(new_output, (WrappedTensor, ops.Operation)), new_output
self._conversion_map[old_output] = new_output
def _convert_reduction(self, y):
# Handle reductions.
if self._pfor_config is None or isinstance(y, ops.Operation):
return None
reduction = self._pfor_config._lookup_reduction(y)
if reduction is None:
return None
(reduction_fn, reduction_args) = reduction
batched_args = []
for reduction_arg in reduction_args:
assert isinstance(reduction_arg, ops.Tensor), reduction_arg
# Tensor being reduced should already be converted due to a control
# dependency on the created placeholder.
# Note that in cases where reduction_arg is in an outer context, one
# needs to locate the corresponding Enter node and use that to lookup
# the conversion.
# TODO(agarwal): handle reductions inside control flow constructs.
assert reduction_arg in self._conversion_map, (
"Unable to handle reduction of %s, possibly as it was used "
"inside a control flow construct. Note that reductions across "
"pfor iterations are currently not supported inside control flow "
"constructs." % reduction_arg)
batched_arg = self._conversion_map[reduction_arg]
batched_args.append(self._unwrap_or_tile(batched_arg))
outputs = reduction_fn(*batched_args)
return [wrap(output, False) for output in nest.flatten(outputs)]
def _convert_helper(self, op_or_tensor):
stack = collections.deque([op_or_tensor])
while stack:
y = stack[0]
if y in self._conversion_map:
assert isinstance(self._conversion_map[y],
(WrappedTensor, ops.Operation))
stack.popleft()
continue
if isinstance(y, ops.Operation):
assert not y.outputs, (
"We only support converting Operation objects with no outputs. "
"Got %s", y)
y_op = y
else:
assert isinstance(y, ops.Tensor), y
y_op = y.op
is_while_loop = y_op.type == "Exit"
if is_while_loop:
while_op = WhileOp(
y, pfor_ops=self._pfor_ops,
fallback_to_while_loop=self.fallback_to_while_loop,
pfor_config=self._pfor_config)
is_inside_loop = while_op.is_inside_loop
# If all nodes in the while_loop graph were created inside the pfor, we
# treat the whole loop subgraph as a single op (y_op) and try to convert
# it. For while_loops that are created completely or partially outside,
# we treat them as external and should be able to simply return the Exit
# node output as is without needing any conversion. Note that for
# while_loops that are partially constructed inside, we assume they will
# be loop invariant. If that is not the case, it will create runtime
# errors since the converted graph would depend on the self._loop_var
# placeholder.
if is_inside_loop:
y_op = while_op
else:
is_inside_loop = self.op_is_inside_loop(y_op)
# If this op was not created inside the loop body, we will return as is.
# 1. Convert inputs and control inputs.
def _add_to_stack(x):
if x not in self._conversion_map:
stack.appendleft(x)
return True
else:
return False
if is_inside_loop:
added_to_stack = False
for inp in y_op.inputs:
added_to_stack |= _add_to_stack(inp)
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
added_to_stack |= _add_to_stack(t)
else:
added_to_stack |= _add_to_stack(cinp)
if added_to_stack:
continue
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
some_input_converted = any(self._was_converted(x) for x in y_op.inputs)
some_input_stacked = any(x.is_stacked for x in converted_inputs)
converted_control_ops = set()
some_control_input_converted = False
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
converted_t = self._conversion_map[t]
if self._was_converted(t):
some_control_input_converted = True
converted_control_ops.add(converted_t.t.op)
else:
converted_cinp = self._conversion_map[cinp]
assert isinstance(converted_cinp, ops.Operation)
if converted_cinp != cinp:
some_control_input_converted = True
converted_control_ops.add(converted_cinp)
converted_control_ops = list(converted_control_ops)
is_stateful = _is_stateful_pfor_op(y_op)
else:
converted_inputs = []
converted_control_ops = []
logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
converted_inputs, converted_control_ops)
# 2. Convert y_op
# If converting a while_loop, we let the while_loop convertor deal with
# putting the control dependencies appropriately.
control_dependencies = [] if is_while_loop else converted_control_ops
with ops.control_dependencies(control_dependencies), ops.name_scope(
y_op.name + "/pfor/"), ops.get_default_graph()._original_op(y_op):
# Op is a placeholder for a reduction.
reduce_output = self._convert_reduction(y)
if reduce_output is not None:
new_outputs = reduce_output
# None of the inputs and control inputs were converted.
elif ((not is_inside_loop or
(not is_stateful and not some_input_converted and
not some_control_input_converted)) and
y.graph == ops.get_default_graph()):
if y is y_op:
assert not isinstance(y_op, WhileOp)
new_outputs = y_op
else:
new_outputs = [wrap(x, False) for x in y_op.outputs]
elif not (is_stateful or is_while_loop or some_input_stacked):
# All inputs are unstacked or unconverted but some control inputs are
# converted.
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked (i.e. any(x.is_sparse_stacked for x in converted_inputs))
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
[x.dtype for x in y_op.outputs],
y_op.node_def.attr)
if y is y_op:
new_outputs = new_op
else:
new_outputs = []
for old_output, new_output in zip(y_op.outputs, new_op.outputs):
handle_data_util.copy_handle_data(old_output, new_output)
new_outputs.append(wrap(new_output, False))
else:
# Either some inputs are not loop invariant or op is stateful.
if hasattr(y_op, "pfor_converter"):
converter = y_op.pfor_converter
else:
converter = _pfor_converter_registry.get(y_op.type, None)
if converter is None:
root_cause = (f"there is no registered converter for this op.")
has_variant_outputs = any(x.dtype == dtypes.variant for x in
y_op.outputs)
has_vectorized_variant_inputs = any(
_is_variant_with_internal_stacking(x) for x in
y_op.inputs)
if (self._fallback_to_while_loop and not has_variant_outputs
and not has_vectorized_variant_inputs):
converter = partial(
_fallback_converter, root_cause=root_cause, warn=self._warn)
else:
message = (f"No pfor vectorization defined for {y_op.type}\n"
f"{y_op}\n inputs: {converted_inputs}.")
if not self._fallback_to_while_loop:
message += ("Consider enabling the fallback_to_while_loop "
"option to pfor, which may run slower.")
raise ValueError(message)
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked. We should only call the converter if it supports handling
# those inputs.
pfor_inputs = _PforInput(self, y_op, converted_inputs)
try:
try:
new_outputs = converter(pfor_inputs)
except ConversionNotImplementedError as e:
has_vectorized_variant_inputs = any(
_is_variant_with_internal_stacking(x) for x in
y_op.inputs)
if (self._fallback_to_while_loop
and not has_vectorized_variant_inputs):
new_outputs = _fallback_converter(
pfor_inputs, root_cause=str(e))
else:
raise ValueError(str(e)).with_traceback(sys.exc_info()[2])
except Exception as e: # pylint: disable=broad-except
logging.error(
f"Got error while pfor was converting op {y_op} with inputs "
f"{y_op.inputs[:]}\n, converted inputs {pfor_inputs.inputs}\n"
f"Here are the pfor conversion stack traces: {e}")
original_op = y_op
while isinstance(original_op, ops.Operation):
logging.error(
"%s\ncreated at:\n %s", original_op,
" ".join(traceback.format_list(original_op.traceback)))
original_op = original_op._original_op
raise
if isinstance(new_outputs, WrappedTensor):
new_outputs = [new_outputs]
assert isinstance(new_outputs,
(list, tuple, ops.Operation)), new_outputs
logging.vlog(2, f"converted {y_op} {new_outputs}")
# Insert into self._conversion_map
if y is y_op:
assert isinstance(new_outputs, ops.Operation)
self._add_conversion(y_op, new_outputs)
else:
assert len(y_op.outputs) == len(new_outputs), (y_op, y_op.outputs,
new_outputs)
for old_output, new_output in zip(y_op.outputs, new_outputs):
assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
assert old_output.dtype == new_output.t.dtype, (new_output, y, y_op)
# Set shape for converted output.
output_shape = old_output.shape
if not new_output.is_sparse_stacked:
if new_output.is_stacked:
loop_len = tensor_util.constant_value(self.loop_len_vector)
if loop_len is None:
batch_dim = tensor_shape.TensorShape([None])
else:
batch_dim = tensor_shape.TensorShape(loop_len)
output_shape = batch_dim.concatenate(output_shape)
if _is_variant_with_internal_stacking(new_output.t):
new_output.t.set_shape([])
else:
new_output.t.set_shape(output_shape)
self._add_conversion(old_output, new_output)
stack.popleft()
return self._conversion_map[op_or_tensor]
@property
def loop_len_vector(self):
"""Returns a single element vector whose value is number of iterations."""
return self._loop_len_vector
@property
def loop_var(self):
"""Returns placeholder loop variable."""
return self._loop_var
@property
def pfor_ops(self):
return self._pfor_ops
@property
def pfor_config(self):
return self._pfor_config
@property
def all_indices_partitioned(self):
"""all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
"""
return self._all_indices_partitioned
@property
def fallback_to_while_loop(self):
return self._fallback_to_while_loop
# The code below defines converters for different operations. Please see comment
# for RegisterPFor to see how converters should be defined.
# image_ops
@RegisterPFor("AdjustContrastv2")
def _convert_adjust_contrastv2(pfor_input):
images = pfor_input.stacked_input(0)
contrast_factor = pfor_input.unstacked_input(1)
return wrap(gen_image_ops.adjust_contrastv2(images, contrast_factor), True)
@RegisterPFor("AdjustHue")
def _convert_adjust_hue(pfor_input):
images = pfor_input.stacked_input(0)
delta = pfor_input.unstacked_input(1)
return wrap(gen_image_ops.adjust_hue(images, delta), True)
@RegisterPFor("AdjustSaturation")
def _convert_adjust_saturation(pfor_input):
images = pfor_input.stacked_input(0)
scale = pfor_input.unstacked_input(1)
return wrap(gen_image_ops.adjust_saturation(images, scale), True)
# nn_ops
def _flatten_first_two_dims(x):
"""Merges first two dimensions."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_first_dim(x, first_dim):
"""Splits first dimension into [first_dim, -1]."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0)
return array_ops.reshape(x, new_shape)
def _inputs_with_flattening(pfor_input, input_indices):
"""Stacks and flattens first dim of inputs at indices `input_indices`."""
if input_indices is None:
input_indices = []
pfor_input.stack_inputs(stack_indices=input_indices)
inputs = []
for i in range(pfor_input.num_inputs):
if i in input_indices:
inp = pfor_input.stacked_input(i)
inp = _flatten_first_two_dims(inp)
else:
inp = pfor_input.unstacked_input(i)
inputs.append(inp)
return inputs
@RegisterPForWithArgs("Conv2D", dims=[0])
@RegisterPForWithArgs("DepthToSpace", dims=[0])
@RegisterPForWithArgs("AvgPool", dims=[0])
@RegisterPForWithArgs("AvgPool3D", dims=[0])
@RegisterPForWithArgs("MaxPool", dims=[0])
@RegisterPForWithArgs("MaxPoolV2", dims=[0])
@RegisterPForWithArgs("MaxPool3D", dims=[0])
@RegisterPForWithArgs("MaxPool3DGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradV2", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPool3DGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradGradV2", dims=[0, 1, 2])
@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
@RegisterPForWithArgs("SparseSoftmaxCrossEntropyWithLogits", dims=[0, 1])
@RegisterPForWithArgs("SpaceToDepth", dims=[0])
def _convert_flatten_batch(pfor_input, op_type, dims):
del op_type
inputs = _inputs_with_flattening(pfor_input, dims)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
_channel_flatten_input_cache = {}
@RegisterPFor("BatchToSpaceND")
def _convert_batch_to_space_nd(pfor_input):
inp = pfor_input.stacked_input(0)
block_shape = pfor_input.unstacked_input(1)
crops = pfor_input.unstacked_input(2)
inp_shape = array_ops.shape(inp)
n = pfor_input.pfor.loop_len_vector
# Reshape and transpose to move the vectorization axis inside the axes that
# will move to space.
# Reshape to 4D and transpose
block_size = math_ops.reduce_prod(block_shape)
new_shape = [n[0], block_size, inp_shape[1] // block_size, -1]
inp = array_ops.reshape(inp, new_shape)
inp = array_ops.transpose(inp, [1, 0, 2, 3])
# Reshape back to merge the block, vectorization and batch dimension, and
# restore the other dimensions.
new_shape = array_ops.concat([n * inp_shape[1], inp_shape[2:]], axis=0)
inp = array_ops.reshape(inp, new_shape)
# Call batch_to_space and then split the new batch axis.
output = gen_array_ops.batch_to_space_nd(inp, block_shape, crops)
output = _unflatten_first_dim(output, n)
return wrap(output, True)
@RegisterPFor("SpaceToBatchND")
def _convert_space_to_batch_nd(pfor_input):
inp = pfor_input.stacked_input(0)
block_shape = pfor_input.unstacked_input(1)
paddings = pfor_input.unstacked_input(2)
n = pfor_input.pfor.loop_len_vector
inp_shape = array_ops.shape(inp)
inp = _flatten_first_two_dims(inp)
output = gen_array_ops.space_to_batch_nd(inp, block_shape, paddings)
output_shape = array_ops.shape(output)
block_size = math_ops.reduce_prod(block_shape)
new_shape = [block_size, n[0], -1]
output = array_ops.reshape(output, new_shape)
output = array_ops.transpose(output, [1, 0, 2])
new_shape = array_ops.concat(
[n, block_size * inp_shape[1:2], output_shape[1:]], axis=0)
output = array_ops.reshape(output, new_shape)
return wrap(output, True)
def _channel_flatten_input(x, data_format):
"""Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWSC.
We then merge the S and C dimension.
Args:
x: ops.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
"""
graph = ops.get_default_graph()
cache_key = (graph, x.ref(), data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
if data_format == b"NCHW":
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], [-1]], axis=0)
reverse_order = [3, 0, 1, 2, 4]
# Move S dimension next to C dimension.
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
# Reshape to merge the S and C dimension.
x = array_ops.reshape(x, shape)
outputs = x, reverse_order, reverse_shape
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
# Note that with training=True, running FusedBatchNormV3 on individual examples
# is very different from running FusedBatchNormV3 on a batch of those examples.
# This is because, for the latter case, the operation can be considered as first
# computing the mean and variance over all the examples and then using these
# to scale all those examples. This creates a data dependency between these
# different "iterations" since the inputs to the scaling step depends on the
# statistics coming from all these inputs.
# As with other kernels, the conversion here effectively runs the kernel
# independently for each iteration, and returns outputs by stacking outputs from
# each of those iterations.
@RegisterPFor("FusedBatchNormV3")
def _convert_fused_batch_norm(pfor_input):
is_training = pfor_input.get_attr("is_training")
# When BatchNorm is used with training=False, mean and variance are provided
# externally and used as is by the op. Thus, we can merge the S and N
# dimensions as we do for regular operations.
# When BatchNorm is used with training=True, mean and variance are computed
# for each channel across the batch dimension (first one). If we merge S and N
# dimensions, mean and variances will be computed over a larger set. So, we
# merge the S and C dimensions instead.
if not is_training:
# We return zeros for batch_mean and batch_variance output. Note that CPU
# and GPU seem to have different behavior for those two outputs. CPU outputs
# zero because these values are not used during inference. GPU outputs
# something, probably real means and variances.
inputs = _inputs_with_flattening(pfor_input, [0])
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
n = pfor_input.pfor.loop_len_vector
y = _unflatten_first_dim(y, n)
mean = pfor_input.unstacked_input(3)
zeros = array_ops.zeros_like(mean)
return [wrap(y, True)] + [wrap(zeros, False)] * 5
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
# We merge the first dimension with the "C" dimension, run FusedBatchNormV3,
# and then transpose back.
x = pfor_input.stacked_input(0)
x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
# Note that we stack all the other inputs as well so that they are the same
# size as the new size of the channel dimension.
inputs = [x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(1, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
y = array_ops.reshape(y, reverse_shape)
y = array_ops.transpose(y, reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [y] + outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("FusedBatchNormGradV3")
def _convert_fused_batch_norm_grad(pfor_input):
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
y_backprop = pfor_input.stacked_input(0)
y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
x = pfor_input.stacked_input(1)
x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
inputs = [y_backprop, x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(2, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
x_backprop = outputs[0]
x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [x_backprop] + outputs
return [wrap(output, True) for output in outputs]
@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
@RegisterPForWithArgs("AvgPool3DGrad", flatten_dims=[1], shape_dim=0)
def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims,
shape_dim):
del op_type
inputs = _inputs_with_flattening(pfor_input, flatten_dims)
n = pfor_input.pfor.loop_len_vector
# Adjust the `input_sizes` input.
ones = array_ops.ones([array_ops.shape(inputs[shape_dim])[0] - 1],
dtype=n.dtype)
inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
@RegisterPFor("Conv2DBackpropFilter")
def _convert_conv2d_backprop_filter(pfor_input):
pfor_input.stack_inputs(stack_indices=[2])
inputs, inputs_stacked, _ = pfor_input.input(0)
filter_sizes = pfor_input.unstacked_input(1)
grads = pfor_input.stacked_input(2)
strides = pfor_input.get_attr("strides")
padding = pfor_input.get_attr("padding")
use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
data_format = pfor_input.get_attr("data_format")
dilations = pfor_input.get_attr("dilations")
if inputs_stacked:
# TODO(agarwal): Implement this efficiently.
logging.warning("Conv2DBackpropFilter uses a while_loop. Fix that!")
def while_body(i, ta):
inp_i = inputs[i, ...]
grad_i = grads[i, ...]
output = nn_ops.conv2d_backprop_filter(
inp_i,
filter_sizes,
grad_i,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
return i + 1, ta.write(i, output)
n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
_, ta = while_loop.while_loop(
lambda i, ta: i < n, while_body,
(0, tensor_array_ops.TensorArray(inputs.dtype, n)))
output = ta.stack()
return wrap(output, True)
else:
# We merge the stack dimension with the channel dimension of the gradients
# and pretend we had a larger filter (see change to filter_sizes below).
# Once the filter backprop is computed, we reshape and transpose back
# appropriately.
grads, _, _ = _channel_flatten_input(grads, data_format)
n = pfor_input.pfor.loop_len_vector
old_filter_sizes = filter_sizes
filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
output = nn_ops.conv2d_backprop_filter(
inputs,
filter_sizes,
grads,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
output = array_ops.reshape(output, new_filter_shape)
output = array_ops.transpose(output, [3, 0, 1, 2, 4])
return wrap(output, True)
def _flatten_with_inner_dim(x, dim, x_rank):
"""Merges the first dim with the specified dim."""
shape = array_ops.shape(x)
x = array_ops.transpose(x,
list(range(1, dim)) + [0] + list(range(dim, x_rank)))
if dim < x_rank - 1:
new_shape_pieces = [shape[1:dim], [-1], shape[dim + 1:]]
else:
new_shape_pieces = [shape[1:dim], [-1]]
new_shape = array_ops.concat(new_shape_pieces, axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_with_inner_dim(x, dim, x_rank, stack_size):
"""Undoes _flatten_with_inner_dim."""
shape = array_ops.shape(x)
if dim < x_rank - 1:
new_shape_pieces = [shape[:dim], [stack_size], [-1], shape[dim + 1:]]
else:
new_shape_pieces = [shape[:dim], [stack_size], [-1]]
new_shape = array_ops.concat(new_shape_pieces, axis=0)
x = array_ops.reshape(x, new_shape)
dims_permutation = [dim] + list(range(dim)) + list(range(dim + 1, x_rank + 1))
return array_ops.transpose(x, dims_permutation)
@RegisterPFor("DepthwiseConv2dNative")
def _convert_depthwise_conv2d_native(pfor_input):
# Kernel can be vectorized, so folding to batch dimension does not work. We
# instead fold into the channel dimension because it is parallel.
stack_size = pfor_input.pfor.loop_len_vector[0]
data_format = pfor_input.get_attr("data_format")
c_dim = 1 if data_format == b"NCHW" else 3
t = _flatten_with_inner_dim(pfor_input.stacked_input(0), c_dim + 1, 5)
kernel = _flatten_with_inner_dim(pfor_input.stacked_input(1), 3, 5)
conv = _create_op(
"DepthwiseConv2dNative", [t, kernel],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(_unflatten_with_inner_dim(conv, c_dim, 4, stack_size), True)
@RegisterPFor("DepthwiseConv2dNativeBackpropInput")
def _convert_depthwise_conv2d_native_backprop_input(pfor_input):
stack_size = pfor_input.pfor.loop_len_vector[0]
input_sizes = pfor_input.unstacked_input(0)
data_format = pfor_input.get_attr("data_format")
c_dim = 1 if data_format == b"NCHW" else 3
input_sizes_mutipliers = [
constant_op.constant([1] * c_dim, dtype=dtypes.int32), [stack_size]
]
if c_dim < 3:
input_sizes_mutipliers += [
constant_op.constant([1] * (3 - c_dim), dtype=dtypes.int32)
]
input_sizes *= array_ops.concat(input_sizes_mutipliers, axis=0)
kernel = _flatten_with_inner_dim(pfor_input.stacked_input(1), 3, 5)
out_backprop = _flatten_with_inner_dim(
pfor_input.stacked_input(2), c_dim + 1, 5)
result = _create_op(
"DepthwiseConv2dNativeBackpropInput", [input_sizes, kernel, out_backprop],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(_unflatten_with_inner_dim(result, c_dim, 4, stack_size), True)
@RegisterPFor("DepthwiseConv2dNativeBackpropFilter")
def _convert_depthwise_conv2d_native_backprop_filter(pfor_input):
stack_size = pfor_input.pfor.loop_len_vector[0]
data_format = pfor_input.get_attr("data_format")
c_dim = 1 if data_format == b"NCHW" else 3
inputs = _flatten_with_inner_dim(pfor_input.stacked_input(0), c_dim + 1, 5)
filter_sizes = pfor_input.unstacked_input(1)
filter_sizes_multipliers = [
constant_op.constant([1, 1], dtype=dtypes.int32), [stack_size],
constant_op.constant([1], dtype=dtypes.int32)
]
filter_sizes *= array_ops.concat(filter_sizes_multipliers, axis=0)
out_backprop = _flatten_with_inner_dim(
pfor_input.stacked_input(2), c_dim + 1, 5)
result = _create_op(
"DepthwiseConv2dNativeBackpropFilter",
[inputs, filter_sizes, out_backprop],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(_unflatten_with_inner_dim(result, 2, 4, stack_size), True)
@RegisterPForWithArgs("LogSoftmax", gen_nn_ops.log_softmax)
@RegisterPForWithArgs("Softmax", gen_nn_ops.softmax)
def _convert_softmax(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(pfor_input.stacked_input(0)), True)
# array_ops
@RegisterPForWithArgs("Identity", array_ops.identity)
@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
@RegisterPForWithArgs("MatrixDiag", array_ops.matrix_diag)
@RegisterPForWithArgs("MatrixDiagPart", array_ops.matrix_diag_part)
@RegisterPForWithArgs("_EagerConst", array_ops.identity)
def _convert_identity(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("IdentityN")
def _convert_identity_n(pfor_input):
outputs = array_ops.identity_n([x.t for x in pfor_input.inputs])
return [
wrap(out, inp.is_stacked) for out, inp in zip(outputs, pfor_input.inputs)
]
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
return wrap(array_ops.reshape(t, new_shape), True)
@RegisterPFor("Fill")
def _convert_fill(pfor_input):
dims = pfor_input.unstacked_input(0)
value = pfor_input.stacked_input(1)
# Expand the rank of `value`
new_shape = array_ops.concat(
[[-1], array_ops.ones([array_ops.size(dims)], dtype=dtypes.int32)],
axis=0)
value = array_ops.reshape(value, new_shape)
# Compute the new output shape
new_dims = array_ops.concat([pfor_input.pfor.loop_len_vector, dims], axis=0)
# Broadcast
return wrap(array_ops.broadcast_to(value, new_dims), True)
@RegisterPFor("BroadcastTo")
def _convert_broadcast_to(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
# Expand dims of stacked t to broadcast against the new shape.
# TODO(davmre): consider factoring out common code with
# `expanddim_inputs_for_broadcast`, which has similar logic but with
# implicit shapes (of input Tensors) rather than explicit shapes.
rank_diff = array_ops.shape(new_shape)[0] - array_ops.rank(t)
ones = array_ops.tile([1], array_ops.reshape(rank_diff, [1]))
t_shape = array_ops.shape(t)
t_expanded_shape = array_ops.concat([t_shape[:1], ones, t_shape[1:]], axis=0)
return wrap(
array_ops.broadcast_to(array_ops.reshape(t, t_expanded_shape), new_shape),
True)
@RegisterPFor("ExpandDims")
def _convert_expanddims(pfor_input):
t = pfor_input.stacked_input(0)
dim = pfor_input.unstacked_input(1)
dim += math_ops.cast(dim >= 0, dim.dtype)
return wrap(array_ops.expand_dims(t, axis=dim), True)
@RegisterPForWithArgs("LowerBound", gen_array_ops.lower_bound)
@RegisterPForWithArgs("UpperBound", gen_array_ops.upper_bound)
def _convert_searchsorted(pfor_input, _, op_func):
pfor_input.stack_inputs()
sorted_inputs = _flatten_first_two_dims(pfor_input.stacked_input(0))
values = _flatten_first_two_dims(pfor_input.stacked_input(1))
out_type = pfor_input.get_attr("out_type")
output = op_func(sorted_inputs, values, out_type)
return wrap(
_unflatten_first_dim(output, pfor_input.pfor.loop_len_vector), True)
@RegisterPFor("MatrixBandPart")
def _convert_matrix_band_part(pfor_input):
t = pfor_input.stacked_input(0)
num_lower = pfor_input.unstacked_input(1)
num_upper = pfor_input.unstacked_input(2)
return wrap(
array_ops.matrix_band_part(t, num_lower=num_lower, num_upper=num_upper),
True)
@RegisterPFor("MatrixSetDiag")
def _convert_matrix_set_diag(pfor_input):
pfor_input.stack_inputs()
t = pfor_input.stacked_input(0)
diag = pfor_input.stacked_input(1)
return wrap(array_ops.matrix_set_diag(t, diag), True)
# Registrations for Matrix{Diag,DiagPart,SetDiag}V2-3.
# The input orders defined in the OpKernel and the actual python API are
# different (for compatibility with V1), so we cannot use _convert_identity.
# v2 is not compatible with v3 and is never exposed on the public API.
@RegisterPFor("MatrixDiagV2")
@RegisterPFor("MatrixDiagV3")
def _convert_matrix_diag_v2(pfor_input):
params = {
"diagonal": pfor_input.stacked_input(0),
"k": pfor_input.unstacked_input(1),
"num_rows": pfor_input.unstacked_input(2),
"num_cols": pfor_input.unstacked_input(3),
"padding_value": pfor_input.unstacked_input(4)
}
if pfor_input.op_type == "MatrixDiagV2":
return wrap(array_ops.matrix_diag_v2(**params), True)
params["align"] = pfor_input.get_attr("align")
return wrap(array_ops.matrix_diag(**params), True)
@RegisterPFor("Diag")
def _convert_diag(pfor_input):
diag = pfor_input.stacked_input(0)
if diag.shape.ndims == 2:
# We can use matrix_diag.
return wrap(array_ops.matrix_diag(diag), True)
else:
# It is not clear if we can do better than a while loop here with existing
# kernels.
return _fallback_converter(pfor_input, warn=False)
# See notes for MatrixDiagV2
@RegisterPFor("MatrixDiagPartV2")
@RegisterPFor("MatrixDiagPartV3")
def _convert_matrix_diag_part_v2(pfor_input):
params = {
"input": pfor_input.stacked_input(0),
"k": pfor_input.unstacked_input(1),
"padding_value": pfor_input.unstacked_input(2)
}
if pfor_input.op_type == "MatrixDiagPartV2":
return wrap(array_ops.matrix_diag_part_v2(**params), True)
params["align"] = pfor_input.get_attr("align")
return wrap(array_ops.matrix_diag_part(**params), True)
# See notes for MatrixDiagV2
@RegisterPFor("MatrixSetDiagV2")
@RegisterPFor("MatrixSetDiagV3")
def _convert_matrix_set_diag_v2(pfor_input):
pfor_input.stack_inputs([0, 1])
params = {
"input": pfor_input.stacked_input(0),
"diagonal": pfor_input.stacked_input(1),
"k": pfor_input.unstacked_input(2)
}
if pfor_input.op_type == "MatrixSetDiagV2":
return wrap(array_ops.matrix_set_diag_v2(**params), True)
params["align"] = pfor_input.get_attr("align")
return wrap(array_ops.matrix_set_diag(**params), True)
@RegisterPFor("DiagPart")
def _convert_diag_part(pfor_input):
inp = pfor_input.stacked_input(0)
if inp.shape.ndims == 3:
# We can use matrix_diag_part.
return wrap(array_ops.matrix_diag_part(inp), True)
else:
# It is not clear if we can do better than a while loop here with existing
# kernels.
return _fallback_converter(pfor_input, warn=False)
@RegisterPFor("OneHot")
def _convert_one_hot(pfor_input):
indices = pfor_input.stacked_input(0)
depth = pfor_input.unstacked_input(1)
on_value = pfor_input.unstacked_input(2)
off_value = pfor_input.unstacked_input(3)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.one_hot(indices, depth, on_value, off_value, axis), True)
@RegisterPFor("Slice")
def _convert_slice(pfor_input):
t = pfor_input.stacked_input(0)
begin, begin_stacked, _ = pfor_input.input(1)
size = pfor_input.unstacked_input(2)
if not begin_stacked:
begin = array_ops.concat([[0], begin], axis=0)
size = array_ops.concat([[-1], size], axis=0)
return wrap(array_ops.slice(t, begin, size), True)
else:
# Handle negative sizes.
#
# If the `begin` entry corresponding to a negative `size` is loop-variant,
# the output would be ragged. This case is not supported. But `size` having
# some negative values and some loop-variant `begin`s is OK (and it's hard
# to tell the difference statically).
original_unstacked_shape = _stack(
array_ops.shape(t)[1:], pfor_input.pfor.loop_len_vector).t
broadcast_size = _stack(size, pfor_input.pfor.loop_len_vector).t
result_shape = array_ops.where(
math_ops.less(broadcast_size, 0),
original_unstacked_shape - begin + broadcast_size + 1, broadcast_size)
result_shape = math_ops.cast(math_ops.reduce_max(result_shape, axis=0),
dtypes.int64)
# Now we enumerate points in the sliced region for each pfor iteration and
# gather them.
cumsize = math_ops.cumprod(result_shape, exclusive=True, reverse=True)
result_num_elements = math_ops.reduce_prod(result_shape)
# Offsets are loop-variant. We first compute loop-invariant gather
# coordinates, then broadcast-add the loop-variant `begin` offsets.
result_base_coordinates = (
math_ops.range(result_num_elements, dtype=dtypes.int64)[:, None]
// cumsize[None, :]) % result_shape[None, :]
result_coordinates = (
begin[:, None, :]
+ math_ops.cast(result_base_coordinates, begin.dtype)[None, :, :])
result_flat = array_ops.gather_nd(params=t, indices=result_coordinates,
batch_dims=1)
result_stacked_shape = array_ops.concat(
[math_ops.cast(pfor_input.pfor.loop_len_vector, result_shape.dtype),
result_shape],
axis=0)
return wrap(array_ops.reshape(result_flat, result_stacked_shape), True)
@RegisterPFor("Tile")
def _convert_tile(pfor_input):
t = pfor_input.stacked_input(0)
multiples = pfor_input.unstacked_input(1)
multiples = array_ops.concat([[1], multiples], 0)
return wrap(array_ops.tile(t, multiples), True)
@RegisterPFor("Pack")
def _convert_pack(pfor_input):
pfor_input.stack_inputs()
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops_stack.stack([x.t for x in pfor_input.inputs], axis=axis), True)
@RegisterPFor("Unpack")
def _convert_unpack(pfor_input):
value = pfor_input.stacked_input(0)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
num = pfor_input.get_attr("num")
return [wrap(x, True) for x
in array_ops_stack.unstack(value, axis=axis, num=num)]
@RegisterPFor("Pad")
def _convert_pad(pfor_input):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("PadV2")
def _convert_pad_v2(pfor_input):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad_v2(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("Split")
def _convert_split(pfor_input):
split_dim = pfor_input.unstacked_input(0)
t = pfor_input.stacked_input(1)
num_split = pfor_input.get_attr("num_split")
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
@RegisterPFor("SplitV")
def _convert_split_v(pfor_input):
t = pfor_input.stacked_input(0)
splits = pfor_input.unstacked_input(1)
split_dim = pfor_input.unstacked_input(2)
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, splits, axis=split_dim)]
@RegisterPFor("Squeeze")
def _convert_squeeze(pfor_input):
t = pfor_input.stacked_input(0)
squeeze_dims = pfor_input.get_attr("squeeze_dims")
squeeze_dims = [i + 1 if i >= 0 else i for i in squeeze_dims]
return wrap(array_ops.squeeze(t, axis=squeeze_dims), True)
@RegisterPFor("ReverseV2")
def _convert_reverse(pfor_input):
value = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
new_axis = array_ops.where_v2(axis >= 0, axis + 1, axis)
return wrap(gen_array_ops.reverse_v2(value, axis=new_axis), True)
@RegisterPForWithArgs("Transpose", gen_array_ops.transpose)
@RegisterPForWithArgs("ConjugateTranspose", gen_array_ops.conjugate_transpose)
def _convert_transpose(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
perm = pfor_input.unstacked_input(1)
new_perm = array_ops.concat([[0], perm + 1], axis=0)
return wrap(op_func(t, new_perm), True)
@RegisterPFor("ZerosLike")
def _convert_zeroslike(pfor_input):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
@RegisterPFor("Gather")
@RegisterPFor("GatherV2")
def _convert_gather(pfor_input):
param, param_stacked, _ = pfor_input.input(0)
indices, indices_stacked, _ = pfor_input.input(1)
batch_dims = pfor_input.get_attr("batch_dims")
op_type = pfor_input.op_type
if op_type == "Gather":
validate_indices = pfor_input.get_attr("validate_indices")
axis = 0
else:
validate_indices = None
# Assume we will never have a Tensor with rank > 2**32.
axis = math_ops.cast(pfor_input.unstacked_input(2), dtypes.int32)
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
if indices_stacked and not param_stacked:
if indices is pfor_input.pfor.all_indices and axis == 0:
param_shape0 = tensor_shape.dimension_value(param.shape[0])
indices_shape0 = tensor_shape.dimension_value(indices.shape[0])
if param_shape0 is not None and indices_shape0 == param_shape0:
# Note that with loops and conditionals, indices may not be contiguous.
# However they will be sorted and unique. So if the shape matches, then
# it must be picking up all the rows of param.
return wrap(param, True)
if batch_dims != 0:
# Convert `batch_dims` to its positive equivalent if necessary.
batch_dims_pos = batch_dims
if batch_dims < 0:
batch_dims_pos += array_ops.rank(indices)
# In order to maintain
# indices.shape[:batch_dims] == params.shape[:batch_dims]
# with stacked indices, we move the first dimension of `indices` to the
# `batch_dims + 1`th position. The (non-batch) index dimensions will be
# inserted into the shape of `output` at the `axis` dimension, which is
# then transposed to the front (below).
order = array_ops.concat([
math_ops.range(1, batch_dims_pos + 1),
[0],
math_ops.range(batch_dims_pos + 1, array_ops.rank(indices))], axis=0)
indices = array_ops.transpose(indices, order)
output = array_ops.gather(
param, indices, validate_indices=validate_indices, axis=axis,
batch_dims=batch_dims)
if axis != 0:
axis = smart_cond.smart_cond(axis < 0,
lambda: axis + array_ops.rank(param),
lambda: ops.convert_to_tensor(axis))
order = array_ops.concat(
[[axis],
math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(output))],
axis=0)
output = smart_cond.smart_cond(
math_ops.equal(axis, 0), lambda: output,
lambda: array_ops.transpose(output, order))
return wrap(output, True)
if param_stacked:
pfor_input.stack_inputs(stack_indices=[1])
indices = pfor_input.stacked_input(1)
if isinstance(axis, ops.Tensor):
axis = array_ops.where(axis >= 0, axis + 1, axis)
else:
axis = axis + 1 if axis >= 0 else axis
batch_dims = batch_dims + 1 if batch_dims >= 0 else batch_dims
output = array_ops.gather(param, indices, axis=axis, batch_dims=batch_dims)
return wrap(output, True)
@RegisterPFor("GatherNd")
def _convert_gather_nd(pfor_input):
# TODO(jmenick): Add support for unstacked params.
pfor_input.stack_inputs(stack_indices=[1])
params = pfor_input.stacked_input(0)
indices = pfor_input.stacked_input(1)
stacked_result = array_ops.gather_nd(params, indices, batch_dims=1)
return wrap(stacked_result, True)
@RegisterPFor("ConcatV2")
def _convert_concatv2(pfor_input):
n = pfor_input.num_inputs
pfor_input.stack_inputs(stack_indices=range(n - 1))
axis = pfor_input.unstacked_input(n - 1)
axis += math_ops.cast(axis >= 0, axis.dtype)
return wrap(
array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
True)
@RegisterPFor("StridedSlice")
def _convert_strided_slice(pfor_input):
inp = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice(
inp,
begin,
end,
strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("StridedSliceGrad")
def _convert_strided_slice_grad(pfor_input):
shape = pfor_input.unstacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
dy = pfor_input.stacked_input(4)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
shape = array_ops.concat(
[math_ops.cast(pfor_input.pfor.loop_len_vector, shape.dtype), shape],
axis=0)
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice_grad(
shape,
begin,
end,
strides,
dy,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("CheckNumerics")
def _convert_check_numerics(pfor_input):
t = pfor_input.stacked_input(0)
message = pfor_input.get_attr("message")
return wrap(gen_array_ops.check_numerics(t, message), True)
@RegisterPFor("EnsureShape")
def _convert_ensure_shape(pfor_input):
t = pfor_input.stacked_input(0)
shape = tensor_shape.TensorShape(pfor_input.get_attr("shape"))
return wrap(gen_array_ops.ensure_shape(t, [None] + shape), True)
# manip_ops
@RegisterPFor("Roll")
def _convert_roll(pfor_input):
t = pfor_input.stacked_input(0)
shift, shift_stacked, _ = pfor_input.input(1)
axis = pfor_input.unstacked_input(2)
if not shift_stacked:
return wrap(manip_ops.roll(t, shift, axis + 1), True)
else:
# `axis` and `shift` may both be vectors, with repeated axes summing the
# corresponding `shift`s. We scatter shifts into a dense array of shape
# [loop_len, num_unstacked_axes] indicating the offset for each axis.
num_unstacked_axes = math_ops.cast(array_ops.rank(t), dtypes.int64) - 1
axis = math_ops.cast(array_ops.reshape(axis, [-1]), dtypes.int64)
loop_len = math_ops.cast(pfor_input.pfor.loop_len_vector[0], dtypes.int64)
shift = math_ops.cast(array_ops.reshape(shift, [loop_len, -1]),
dtypes.int64)
axis_segment_ids = (
math_ops.range(loop_len, dtype=dtypes.int64)[:, None]
* num_unstacked_axes + axis[None, :])
axis_offsets = array_ops.reshape(
math_ops.unsorted_segment_sum(
data=shift, segment_ids=axis_segment_ids,
num_segments=loop_len * num_unstacked_axes),
[loop_len, num_unstacked_axes])
# Determine the coordinates in the input array of each result and gather
# them.
unstacked_shape = array_ops.shape(t, out_type=dtypes.int64)[1:]
cumsize = math_ops.cumprod(unstacked_shape, exclusive=True, reverse=True)
num_unstacked_elements = math_ops.reduce_prod(unstacked_shape)
result_coordinates = (
(math_ops.range(num_unstacked_elements,
dtype=dtypes.int64)[None, :, None]
// cumsize[None, None, :] - axis_offsets[:, None, :])
% unstacked_shape[None, None, :])
result_flat = array_ops.gather_nd(params=t, indices=result_coordinates,
batch_dims=1)
return wrap(array_ops.reshape(result_flat, array_ops.shape(t)),
True)
# math_ops
@RegisterPFor("MatMul")
def _convert_matmul(pfor_input):
# TODO(agarwal): Check if tiling is faster than two transposes.
a, a_stacked, _ = pfor_input.input(0)
b, b_stacked, _ = pfor_input.input(1)
tr_a = pfor_input.get_attr("transpose_a")
tr_b = pfor_input.get_attr("transpose_b")
if a_stacked and b_stacked:
output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True)
return output
elif a_stacked:
if tr_a:
a = array_ops.transpose(a, [0, 2, 1])
if a.shape.is_fully_defined():
x, y, z = a.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(a), 3)
]
a = array_ops.reshape(a, [x * y, z])
prod = math_ops.matmul(a, b, transpose_b=tr_b)
return wrap(array_ops.reshape(prod, [x, y, -1]), True)
else:
assert b_stacked
if tr_b:
perm = [2, 0, 1]
b = array_ops.transpose(b, perm)
else:
# As an optimization, if one of the first two dimensions is 1, then we can
# reshape instead of transpose.
# TODO(agarwal): This check can be done inside Transpose kernel.
b_shape = array_ops.shape(b)
min_dim = math_ops.minimum(b_shape[0], b_shape[1])
perm = array_ops.where(
math_ops.equal(min_dim, 1), [0, 1, 2], [1, 0, 2])
new_shape = array_ops_stack.stack([b_shape[1], b_shape[0], b_shape[2]])
b = array_ops.transpose(b, perm)
b = array_ops.reshape(b, new_shape)
if b.shape.is_fully_defined():
x, y, z = b.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(b), 3)
]
b = array_ops.reshape(b, [x, y * z])
prod = math_ops.matmul(a, b, transpose_a=tr_a)
prod = array_ops.reshape(prod, [-1, y, z])
prod = array_ops.transpose(prod, [1, 0, 2])
return wrap(prod, True)
# TODO(rmlarsen): Use the converter of BatchMatMulV2 once compatibility window
# is met.
@RegisterPFor("BatchMatMul")
def _convert_batch_mat_mul(pfor_input):
# TODO(agarwal): There may be a more efficient way to do this instead of
# stacking the inputs.
pfor_input.stack_inputs()
x = pfor_input.stacked_input(0)
y = pfor_input.stacked_input(1)
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
x = _flatten_first_two_dims(x)
y = _flatten_first_two_dims(y)
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
return wrap(output, True)
@RegisterPFor("BatchMatMulV2")
def _convert_batch_mat_mul_v2(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
return wrap(output, True)
@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
@RegisterPForWithArgs("Max", math_ops.reduce_max)
@RegisterPForWithArgs("Min", math_ops.reduce_min)
@RegisterPForWithArgs("Mean", math_ops.reduce_mean)
@RegisterPForWithArgs("All", math_ops.reduce_all)
@RegisterPForWithArgs("Any", math_ops.reduce_any)
def _convert_reduction(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
indices += math_ops.cast(indices >= 0, indices.dtype)
keep_dims = pfor_input.get_attr("keep_dims")
return wrap(op_func(t, indices, keepdims=keep_dims), True)
@RegisterPForWithArgs("ArgMax", math_ops.argmax)
@RegisterPForWithArgs("ArgMin", math_ops.argmin)
def _convert_argmax_argmin(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
dimension = pfor_input.unstacked_input(1)
dimension += math_ops.cast(dimension >= 0, dimension.dtype)
output_type = pfor_input.get_attr("output_type")
return wrap(op_func(t, axis=dimension, output_type=output_type), True)
@RegisterPFor("Bucketize")
def _convert_bucketize(pfor_input):
t = pfor_input.stacked_input(0)
boundaries = pfor_input.get_attr("boundaries")
return wrap(math_ops.bucketize(t, boundaries), True)
@RegisterPFor("ClipByValue")
def _convert_clip_by_value(pfor_input):
t = pfor_input.stacked_input(0)
clip_value_min = pfor_input.unstacked_input(1)
clip_value_max = pfor_input.unstacked_input(2)
return wrap(gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max),
True)
@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
def _convert_cumfoo(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
axis += math_ops.cast(axis >= 0, axis.dtype)
exclusive = pfor_input.get_attr("exclusive")
reverse = pfor_input.get_attr("reverse")
return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
@RegisterPFor("BiasAdd")
def _convert_biasadd(pfor_input):
t, t_stacked, _ = pfor_input.input(0)
bias, bias_stacked, _ = pfor_input.input(1)
data_format = pfor_input.get_attr("data_format").decode()
if bias_stacked:
# BiasAdd only supports 1-D biases, so cast bias to match value and use Add.
pfor_input.expanddim_inputs_for_broadcast()
t, _, _ = pfor_input.input(0)
bias = math_ops.cast(pfor_input.stacked_input(1), t.dtype)
if compat.as_bytes(data_format) == b"NCHW":
b_shape = array_ops.shape(bias)
new_b_shape = array_ops.concat(
[b_shape[:-3], b_shape[-1:], b_shape[-3:-1]], axis=0)
bias = array_ops.reshape(bias, new_b_shape)
return wrap(math_ops.add(t, bias), True)
else:
assert t_stacked, "At least one input to BiasAdd should be loop variant."
if compat.as_bytes(data_format) == b"NCHW":
shape = array_ops.shape(t)
flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
t = array_ops.reshape(t, flattened_shape)
t = nn_ops.bias_add(t, bias, data_format="NCHW")
t = array_ops.reshape(t, shape)
return wrap(t, True)
return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
@RegisterPForWithArgs("UnsortedSegmentSum", math_ops.unsorted_segment_sum)
@RegisterPForWithArgs("UnsortedSegmentMax", math_ops.unsorted_segment_max)
@RegisterPForWithArgs("UnsortedSegmentMin", math_ops.unsorted_segment_min)
@RegisterPForWithArgs("UnsortedSegmentProd", math_ops.unsorted_segment_prod)
def _convert_unsortedsegmentsum(pfor_input, _, op_func):
pfor_input.stack_inputs([0, 1])
data = pfor_input.stacked_input(0)
segment_ids = pfor_input.stacked_input(1)
# TODO(agarwal): handle stacked?
num_segments = pfor_input.unstacked_input(2)
if segment_ids.dtype != num_segments.dtype:
segment_ids = math_ops.cast(segment_ids, dtypes.int64)
num_segments = math_ops.cast(num_segments, dtypes.int64)
dtype = segment_ids.dtype
segment_shape = array_ops.shape(segment_ids, out_type=dtype)
n = segment_shape[0]
ones = array_ops.ones_like(segment_shape, dtype=dtype)[1:]
segment_offset = num_segments * math_ops.range(n, dtype=dtype)
segment_offset = array_ops.reshape(segment_offset,
array_ops.concat([[n], ones], axis=0))
segment_ids += segment_offset
num_segments = math_ops.cast(num_segments, dtypes.int64) * math_ops.cast(
n, dtypes.int64)
output = op_func(data, segment_ids, num_segments)
new_output_shape = array_ops.concat(
[[n, -1], array_ops.shape(output)[1:]], axis=0)
output = array_ops.reshape(output, new_output_shape)
return wrap(output, True)
def _flatten_array_with_offset(ids, offset_delta, num_rows):
"""Flattens a rank 2 tensor, adding an offset to each row."""
# Note that if `ids` is rank 1, it is broadcast to rank 2.
offset_delta = math_ops.cast(offset_delta, ids.dtype)
n = math_ops.cast(num_rows, dtype=ids.dtype)
offsets = math_ops.range(
start=0, limit=n * offset_delta, delta=offset_delta, dtype=ids.dtype)
offsets = array_ops.expand_dims(offsets, -1)
ids += offsets
return array_ops.reshape(ids, [-1])
@RegisterPForWithArgs("SparseSegmentSum", math_ops.sparse_segment_sum_v2)
@RegisterPForWithArgs("SparseSegmentMean", math_ops.sparse_segment_mean_v2)
@RegisterPForWithArgs("SparseSegmentSqrtN", math_ops.sparse_segment_sqrt_n_v2)
@RegisterPForWithArgs("SparseSegmentSumWithNumSegments",
math_ops.sparse_segment_sum_v2)
@RegisterPForWithArgs("SparseSegmentMeanWithNumSegments",
math_ops.sparse_segment_mean_v2)
@RegisterPForWithArgs("SparseSegmentSqrtNWithNumSegments",
math_ops.sparse_segment_sqrt_n_v2)
def _convert_sparse_segment(pfor_input, _, op_func):
_, segment_ids_stacked, _ = pfor_input.input(2)
if segment_ids_stacked:
pfor_input.stack_inputs([1])
data, data_stacked, _ = pfor_input.input(0)
indices, _, _ = pfor_input.input(1)
num_inputs = len(pfor_input.inputs)
assert num_inputs in (3, 4)
if num_inputs == 3:
# `segment_ids` needs to be unstacked since otherwise output sizes could
# differ across pfor iterations.
segment_ids = pfor_input.unstacked_input(2)
num_segments = nn_ops.relu(math_ops.reduce_max(segment_ids) + 1)
else:
segment_ids, _, _ = pfor_input.input(2)
num_segments = pfor_input.unstacked_input(3)
n = pfor_input.pfor.loop_len_vector[0]
if data_stacked:
indices = _flatten_array_with_offset(indices, array_ops.shape(data)[1], n)
data = _flatten_first_two_dims(data)
else:
indices = array_ops.reshape(indices, [-1])
segment_ids = _flatten_array_with_offset(segment_ids, num_segments, n)
if num_inputs == 3:
num_segments = None
else:
num_segments *= n
output = op_func(data, indices, segment_ids, num_segments=num_segments)
output = _unflatten_first_dim(output, [n])
return wrap(output, True)
@RegisterPForWithArgs("SparseSegmentSumGrad", math_ops.sparse_segment_sum_grad)
@RegisterPForWithArgs("SparseSegmentMeanGrad",
math_ops.sparse_segment_mean_grad)
@RegisterPForWithArgs("SparseSegmentSqrtNGrad",
math_ops.sparse_segment_sqrt_n_grad)
def _convert_sparse_segment_grad(pfor_input, _, op_func):
grad = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
segment_ids = pfor_input.unstacked_input(2)
dim0 = pfor_input.unstacked_input(3)
n = pfor_input.pfor.loop_len_vector[0]
indices = _flatten_array_with_offset(indices, dim0, n)
num_segments = nn_ops.relu(math_ops.reduce_max(segment_ids) + 1)
segment_ids = _flatten_array_with_offset(segment_ids, num_segments, n)
grad = _flatten_first_two_dims(grad)
dim0 *= n
output = op_func(grad, indices, segment_ids, dim0)
output = _unflatten_first_dim(output, [n])
return wrap(output, True)
@RegisterPFor("Cast")
def _convert_cast(pfor_input):
inp = pfor_input.stacked_input(0)
dtype = pfor_input.get_attr("DstT")
return wrap(math_ops.cast(inp, dtype), True)
@RegisterPFor("Abs")
@RegisterPFor("Acos")
@RegisterPFor("Acosh")
@RegisterPFor("Add")
@RegisterPFor("AddV2")
@RegisterPFor("Angle")
@RegisterPFor("Asin")
@RegisterPFor("Asinh")
@RegisterPFor("Atan")
@RegisterPFor("Atan2")
@RegisterPFor("Atanh")
@RegisterPFor("BesselI0")
@RegisterPFor("BesselI1")
@RegisterPFor("BesselI0e")
@RegisterPFor("BesselI1e")
@RegisterPFor("BesselK0")
@RegisterPFor("BesselK1")
@RegisterPFor("BesselK0e")
@RegisterPFor("BesselK1e")
@RegisterPFor("BesselJ0")
@RegisterPFor("BesselJ1")
@RegisterPFor("BesselY0")
@RegisterPFor("BesselY1")
@RegisterPFor("BitwiseAnd")
@RegisterPFor("BitwiseOr")
@RegisterPFor("BitwiseXor")
@RegisterPFor("Ceil")
@RegisterPFor("Complex")
@RegisterPFor("ComplexAbs")
@RegisterPFor("Conj")
@RegisterPFor("Cos")
@RegisterPFor("Cosh")
@RegisterPFor("Dawsn")
@RegisterPFor("Digamma")
@RegisterPFor("Div")
@RegisterPFor("DivNoNan")
@RegisterPFor("Elu")
@RegisterPFor("Erf")
@RegisterPFor("Erfc")
@RegisterPFor("Erfinv")
@RegisterPFor("Exp")
@RegisterPFor("Expint")
@RegisterPFor("Expm1")
@RegisterPFor("Floor")
@RegisterPFor("FloorDiv")
@RegisterPFor("FloorMod")
@RegisterPFor("FresnelCos")
@RegisterPFor("FresnelSin")
@RegisterPFor("Greater")
@RegisterPFor("GreaterEqual")
@RegisterPFor("Igamma")
@RegisterPFor("IgammaGradA")
@RegisterPFor("Igammac")
@RegisterPFor("Imag")
@RegisterPFor("Inv")
@RegisterPFor("Invert")
@RegisterPFor("IsFinite")
@RegisterPFor("IsInf")
@RegisterPFor("IsNan")
@RegisterPFor("LeftShift")
@RegisterPFor("Less")
@RegisterPFor("LessEqual")
@RegisterPFor("Lgamma")
@RegisterPFor("Log")
@RegisterPFor("Log1p")
@RegisterPFor("LogicalAnd")
@RegisterPFor("LogicalNot")
@RegisterPFor("LogicalOr")
@RegisterPFor("LogicalXor")
@RegisterPFor("Maximum")
@RegisterPFor("Minimum")
@RegisterPFor("Mod")
@RegisterPFor("Mul")
@RegisterPFor("MulNoNan")
@RegisterPFor("Ndtri")
@RegisterPFor("Neg")
@RegisterPFor("Polygamma")
@RegisterPFor("Pow")
@RegisterPFor("Real")
@RegisterPFor("RealDiv")
@RegisterPFor("Reciprocal")
@RegisterPFor("Relu")
@RegisterPFor("Relu6")
@RegisterPFor("RightShift")
@RegisterPFor("Rint")
@RegisterPFor("Round")
@RegisterPFor("Rsqrt")
@RegisterPFor("Selu")
@RegisterPFor("Sigmoid")
@RegisterPFor("Sign")
@RegisterPFor("Sin")
@RegisterPFor("Sinh")
@RegisterPFor("Softplus")
@RegisterPFor("Softsign")
@RegisterPFor("Spence")
@RegisterPFor("Sqrt")
@RegisterPFor("Square")
@RegisterPFor("SquaredDifference")
@RegisterPFor("Sub")
@RegisterPFor("Tan")
@RegisterPFor("Tanh")
@RegisterPFor("TruncateDiv")
@RegisterPFor("TruncateMod")
@RegisterPFor("Xdivy")
@RegisterPFor("Xlogy")
@RegisterPFor("Xlog1py")
@RegisterPFor("Zeta")
def _convert_cwise(pfor_input):
if pfor_input.num_inputs > 1:
pfor_input.expanddim_inputs_for_broadcast()
out = _create_op(
pfor_input.op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
assert len(out) == 1
out = out[0]
op_output = wrap(out, True)
return op_output
@RegisterPFor("XlaSharding")
def _convert_xla_sharding(pfor_input):
t = pfor_input.stacked_input(0)
sharding = pfor_input.get_attr("sharding")
return wrap(xla.sharding(t, sharding=sharding), True)
@RegisterPFor("LeakyRelu")
def _convert_leaky_relu(pfor_input):
t = pfor_input.stacked_input(0)
alpha = pfor_input.get_attr("alpha")
return wrap(gen_nn_ops.leaky_relu(t, alpha=alpha), True)
@RegisterPFor("Equal")
def _convert_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
incompatible_shape_error = pfor_input.get_attr("incompatible_shape_error")
return wrap(gen_math_ops.equal(
x, y, incompatible_shape_error=incompatible_shape_error), True)
@RegisterPFor("NotEqual")
def _convert_not_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
incompatible_shape_error = pfor_input.get_attr("incompatible_shape_error")
return wrap(gen_math_ops.not_equal(
x, y, incompatible_shape_error=incompatible_shape_error), True)
@RegisterPFor("ApproximateEqual")
def _convert_approximate_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
tolerance = pfor_input.get_attr("tolerance")
return wrap(math_ops.approximate_equal(x, y, tolerance=tolerance), True)
@RegisterPFor("Shape")
def _convert_shape(pfor_input):
out_type = pfor_input.get_attr("out_type")
return wrap(
array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
False)
@RegisterPFor("ShapeN")
def _convert_shape_n(pfor_input):
out_type = pfor_input.get_attr("out_type")
shapes = [
array_ops.shape(x, out_type=out_type)[1:] if stacked else array_ops.shape(
x, out_type=out_type) for x, stacked, _ in pfor_input.inputs
]
return [wrap(x, False) for x in shapes]
@RegisterPFor("Size")
def _convert_size(pfor_input):
out_type = pfor_input.get_attr("out_type")
n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
return wrap(
array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
False)
@RegisterPFor("Rank")
def _convert_rank(pfor_input):
return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
@RegisterPFor("AddN")
def _convert_addn(pfor_input):
# AddN does not support broadcasting.
pfor_input.stack_inputs(tile_variants=False)
return _wrap_and_tile_variants(
math_ops.add_n([x.t for x in pfor_input.inputs]),
pfor_input.pfor.loop_len_vector)
@RegisterPFor("Cross")
def _convert_cross(pfor_input):
pfor_input.stack_inputs()
a = pfor_input.stacked_input(0)
b = pfor_input.stacked_input(1)
return wrap(math_ops.cross(a, b), True)
@RegisterPFor("BiasAddGrad")
def _convert_biasaddgrad(pfor_input):
grad = pfor_input.stacked_input(0)
fmt = pfor_input.get_attr("data_format")
if fmt == b"NCHW":
output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
else:
grad_shape = array_ops.shape(grad)
last_dim_shape = grad_shape[-1]
first_dim_shape = grad_shape[0]
output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
return wrap(output, True)
# Some required ops are not exposed under the tf namespace. Hence relying on
# _create_op to create them.
@RegisterPForWithArgs("EluGrad")
@RegisterPForWithArgs("LeakyReluGrad")
@RegisterPForWithArgs("ReciprocalGrad")
@RegisterPForWithArgs("Relu6Grad")
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("RsqrtGrad")
@RegisterPForWithArgs("SeluGrad")
@RegisterPForWithArgs("SigmoidGrad")
@RegisterPForWithArgs("SoftplusGrad")
@RegisterPForWithArgs("SoftsignGrad")
@RegisterPForWithArgs("SqrtGrad")
@RegisterPForWithArgs("TanhGrad")
def _convert_grads(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
# TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
# have to use tiling here.
pfor_input.stack_inputs()
outputs = _create_op(
op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Select")
def _convert_select(pfor_input):
pfor_input.stack_inputs()
cond = pfor_input.stacked_input(0)
t = pfor_input.stacked_input(1)
e = pfor_input.stacked_input(2)
cond_rank = array_ops.rank(cond)
cond, t, e = smart_cond.smart_cond(
cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
lambda: [cond, t, e])
outputs = _create_op(
pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
out = smart_cond.smart_cond(cond_rank > 1,
lambda: _unflatten_first_dim(outputs[0], n),
lambda: outputs[0])
return [wrap(out, True) for x in outputs]
@RegisterPFor("SelectV2")
def _convert_selectv2(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
cond = pfor_input.input(0)[0]
t = pfor_input.input(1)[0]
e = pfor_input.input(2)[0]
out = array_ops.where_v2(cond, t, e)
return wrap(out, True)
# random_ops
def _transpose_dim_to_front(x, dim):
rank = array_ops.rank(x)
return array_ops.transpose(
x,
perm=array_ops.concat(
[[dim], math_ops.range(0, dim),
math_ops.range(dim + 1, rank)],
axis=0))
@RegisterPForWithArgs("RandomUniform")
@RegisterPForWithArgs("RandomUniformInt")
@RegisterPForWithArgs("RandomStandardNormal")
@RegisterPForWithArgs("TruncatedNormal")
def _convert_random(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
# inputs[0] is "shape"
inputs[0] = array_ops.concat([pfor_input.pfor.loop_len_vector, inputs[0]],
axis=0)
# TODO(b/222761732): Turn this warning back on when legacy RNGs are
# deprecated.
# logging.warning(
# "Note that %s inside pfor op may not give same output as "
# "inside a sequential loop.", op_type)
outputs = _create_op(
op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("RandomGamma")
@RegisterPFor("RandomPoissonV2")
def _convert_random_with_param(pfor_input):
shape = pfor_input.unstacked_input(0)
# param is lam (Poisson rate) or alpha (Gamma shape).
param, param_stacked, _ = pfor_input.input(1)
# TODO(b/222761732): Turn this warning back on when legacy RNGs are
# deprecated.
# logging.warning(
# "Note that %s inside pfor op may not give same output as "
# "inside a sequential loop.", pfor_input.op_type)
if param_stacked:
samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
loop_dim = array_ops.shape(shape)[0]
stacked_samples = _transpose_dim_to_front(samples, loop_dim)
else:
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
stacked_samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(stacked_samples, True)
@RegisterPFor("Multinomial")
def _convert_multinomial(pfor_input):
logits, logits_stacked, _ = pfor_input.input(0)
num_samples = pfor_input.unstacked_input(1)
seed = pfor_input.get_attr("seed")
seed2 = pfor_input.get_attr("seed2")
output_dtype = pfor_input.get_attr("output_dtype")
# TODO(b/222761732): Turn this warning back on when legacy RNGs are
# deprecated.
# logging.warning(
# "Note that Multinomial inside pfor op may not give same output as "
# "inside a sequential loop.")
n = pfor_input.pfor.loop_len_vector[0]
if logits_stacked:
flattened_logits = _flatten_first_two_dims(logits)
samples = gen_random_ops.multinomial(
flattened_logits,
num_samples,
seed=seed,
seed2=seed2,
output_dtype=output_dtype)
stacked_samples = _unflatten_first_dim(samples, [n])
else:
samples = gen_random_ops.multinomial(
logits,
num_samples * n,
seed=seed,
seed2=seed2,
output_dtype=output_dtype)
stacked_samples = array_ops.transpose(
array_ops.reshape(samples, [-1, n, num_samples]), [1, 0, 2])
return wrap(stacked_samples, True)
@RegisterPFor("StatelessMultinomial")
@RegisterPFor("StatelessParameterizedTruncatedNormal")
@RegisterPFor("StatelessRandomBinomial")
@RegisterPFor("StatelessRandomGammaV2")
@RegisterPFor("StatelessRandomNormal")
@RegisterPFor("StatelessRandomPoisson")
@RegisterPFor("StatelessRandomUniform")
@RegisterPFor("StatelessRandomUniformInt")
@RegisterPFor("StatelessRandomUniformFullInt")
@RegisterPFor("StatelessTruncatedNormal")
def _convert_stateless_multinomial(pfor_input):
# Unlike stateful random ops, for stateless ones we want better
# reproducibility based on seed. Hence we don't want to use a similar strategy
# as used for stateful ones where we generate a possibly different set of
# random numbers under vectorization.
# Unfortunately, the kernels currently are not necessarily setup to do this
# efficiently and hence we fallback to a sequential loop for vectorization.
return _fallback_converter(pfor_input, warn=False)
# linalg_ops
@RegisterPForWithArgs("XlaEinsum")
@RegisterPForWithArgs("Einsum")
def _convert_einsum(pfor_input, op_type):
# Einsum may have either 1 or 2 inputs.
inputs, input_stacked, _ = zip(*[
pfor_input.input(i)
for i in range(pfor_input.num_inputs)])
# Parse the einsum equation.
equation = pfor_input.get_attr("equation").decode("utf-8")
input_expr, output_expr = equation.split("->")
input_exprs = input_expr.split(",")
# Pick a placeholder symbol to use for the new axis.
chosen_symbol = None
for s in string.ascii_letters:
if s in equation:
continue
else:
chosen_symbol = s
break
if chosen_symbol is None:
raise ValueError("Could not figure out what symbol to use for new axis.")
assert any(input_stacked)
for i in range(len(inputs)):
if input_stacked[i]:
input_exprs[i] = "{}{}".format(chosen_symbol, input_exprs[i])
output_expr = "{}{}".format(chosen_symbol, output_expr)
new_equation = "{}->{}".format(",".join(input_exprs), output_expr)
if op_type == "XlaEinsum":
if len(inputs) == 1:
result = xla.einsum(equation=new_equation, a=inputs[0])
else:
result = xla.einsum(equation=new_equation, a=inputs[0], b=inputs[1])
else:
assert op_type == "Einsum"
result = special_math_ops.einsum(new_equation, *inputs)
return wrap(result, True)
@RegisterPFor("Cholesky")
def _convert_cholesky(pfor_input):
t = pfor_input.stacked_input(0)
return wrap(linalg_ops.cholesky(t), True)
@RegisterPFor("LogMatrixDeterminant")
def _convert_log_matrix_determinant(pfor_input):
t = pfor_input.stacked_input(0)
return [wrap(x, True) for x in linalg_ops.log_matrix_determinant(t)]
@RegisterPFor("MatrixInverse")
def _convert_matrix_inverse(pfor_input):
t = pfor_input.stacked_input(0)
adjoint = pfor_input.get_attr("adjoint")
return wrap(gen_linalg_ops.matrix_inverse(t, adjoint=adjoint), True)
@RegisterPFor("MatrixSolve")
def _convert_matrix_solve(pfor_input):
pfor_input.stack_inputs()
matrix = pfor_input.stacked_input(0)
rhs = pfor_input.stacked_input(1)
adjoint = pfor_input.get_attr("adjoint")
output = gen_linalg_ops.matrix_solve(
matrix, rhs, adjoint=adjoint)
return wrap(output, True)
@RegisterPFor("MatrixTriangularSolve")
def _convert_matrix_triangular_solve(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
matrix = pfor_input.input(0)[0]
rhs = pfor_input.input(1)[0]
lower = pfor_input.get_attr("lower")
adjoint = pfor_input.get_attr("adjoint")
output = linalg_ops.matrix_triangular_solve(
matrix, rhs, lower=lower, adjoint=adjoint)
return wrap(output, True)
@RegisterPFor("SelfAdjointEigV2")
def _convert_self_adjoint_eig(pfor_input):
t = pfor_input.stacked_input(0)
compute_v = pfor_input.get_attr("compute_v")
e, v = gen_linalg_ops.self_adjoint_eig_v2(t, compute_v=compute_v)
# If compute_v is False, v will have shape [0].
return wrap(e, True), wrap(v, compute_v)
# logging_ops
@RegisterPFor("Assert")
def _convert_assert(pfor_input):
cond, cond_stacked, _ = pfor_input.input(0)
if cond_stacked:
cond = math_ops.reduce_all(cond)
data_list = [x.t for x in pfor_input.inputs][1:]
return _create_op(
"Assert", [cond] + data_list, [], attrs=pfor_input.op.node_def.attr)
@RegisterPFor("Print")
def _convert_print(pfor_input):
# Note that we don't stack all the inputs. Hence unstacked values are printed
# once here vs multiple times in a while_loop.
pfor_input.stack_inputs([0])
outputs = _create_op(
"Print", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("PrintV2")
def _convert_print_v2(pfor_input):
# Print the full input Tensor(s), including the batch dimension if stacked.
return _create_op(
"PrintV2", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr)
@RegisterPFor("StringFormat")
def _convert_string_format(pfor_input):
# Format using the full input Tensor(s), including the batch dimension if
# stacked.
op = _create_op(
"StringFormat", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr)
return [wrap(output, False) for output in op.outputs]
# data_flow_ops
# TensorArray conversion is tricky since we don't support arrays of
# TensorArrays. For converting them, we consider two distinct cases:
#
# 1. The array is constructed outside the pfor call, and read/written inside the
# loop.
# This is an easier case since we don't need to make an array of TensorArrays.
# A correctness requirement is that these parallel iterations shouldn't attempt
# to write to the same location. Hence at conversion time we disallow indices to
# be loop-invariant as that would guarantee a collision. Even if the indices are
# not loop-invariant, they could conflict and that shall trigger runtime errors.
#
# 2. The array is constructed and used entirely inside each pfor iteration.
# For simplicity, here we require that the indices used for write/scatter are
# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
# different pfor iterations. We consider two sub_cases:
#
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
# In this case we don't increase the dimensions to avoid redundant tiling. Each
# iteration is trying to write the same value. So we convert that to a single
# write.
#
# Here are some tricks used to implement the above:
# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
# trying to trace whether future writes are stacked or unstacked in order to set
# this attr, we set it to correspond to unknown shape.
# - We use the "flow" output of the different ops to track whether the array
# elements are stacked or unstacked. If a stacked write/scatter is done, we make
# the flow stacked as well.
# - We use some heuristic traversal of the graph to track whether the
# TensorArray handle was created inside or outside the pfor loop.
@RegisterPFor("TensorArrayV3")
def _convert_tensor_array_v3(pfor_input):
size = pfor_input.unstacked_input(0)
dtype = pfor_input.get_attr("dtype")
dynamic_size = pfor_input.get_attr("dynamic_size")
clear_after_read = pfor_input.get_attr("clear_after_read")
identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
tensor_array_name = pfor_input.get_attr("tensor_array_name")
handle, flow = data_flow_ops.tensor_array_v3(
size,
dtype=dtype,
# We don't set element shape since we don't know if writes are stacked or
# not yet.
element_shape=None,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name)
# Note we keep flow unstacked for now since we don't know if writes will be
# stacked or not.
return wrap(handle, False), wrap(flow, False)
@RegisterPFor("TensorArraySizeV3")
def _convert_tensor_array_size_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
size = data_flow_ops.tensor_array_size_v3(handle, flow)
return wrap(size, False)
def _handle_inside_pfor(pfor_input, handle):
"""Returns True if handle was created inside the pfor loop."""
# We use some heuristic to find the original TensorArray creation op.
# The logic should handle the common cases (except cond based subgraphs).
# In theory the user could perform different operations on the handle (like
# Reshape, stack multiple handles, etc) which could break this logic.
# TODO(agarwal): handle Switch/Merge.
while handle.op.type in ("Enter", "Identity"):
handle = handle.op.inputs[0]
if handle.op.type not in [
"TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"
]:
raise ValueError(f"Unable to find source for handle {handle}.")
else:
return pfor_input.pfor.op_is_inside_loop(handle.op)
def _unstack_flow(value):
# TODO(agarwal): consider looking if this is a Tile op then get its input.
# This may avoid running the Tile operations.
return array_ops.gather(value, 0)
@RegisterPFor("TensorArrayReadV3")
def _convert_tensor_array_read_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
dtype = pfor_input.get_attr("dtype")
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside_pfor:
# Note that if we are inside a control flow construct inside the pfor, and
# only some of the iterations are doing the read (i.e.
# `all_indices_partitioned` is True), then the read operation should only
# return values for the currently active pfor iterations (`all_indices`
# below). Hence, whenever the returned value is stacked (i.e. `flow` is
# stacked), we may need to do an extra gather after reading the values. Also
# note that if `is_inside` is false, then values in the tensor array are
# unstacked. So the check is only needed in this branch.
all_indices = pfor_input.pfor.all_indices
all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
# Note: flow_stacked indicates if values in the TensorArray are stacked or
# not.
if index_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayReadV3 was called on a TensorArray whose"
" values are not loop-invariant, and the read indices were also"
" not loop invariant. This is currently unsupported.")
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
return wrap(value, True)
value = data_flow_ops.tensor_array_read_v3(handle, index, flow, dtype=dtype)
if flow_stacked and all_indices_partitioned:
value = array_ops.gather(value, all_indices)
return wrap(value, flow_stacked)
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on index_stacked.
if index_stacked:
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
else:
value = data_flow_ops.tensor_array_read_v3(handle, index, flow, dtype=dtype)
return wrap(value, index_stacked)
@RegisterPFor("TensorArrayWriteV3")
def _convert_tensor_array_write_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if value_stacked and pfor_input.pfor.all_indices_partitioned:
# Looks like we are in a control flow in a pfor where not all iterations are
# active now. We don't allow that since that could lead to different indices
# having different shapes which will be hard to merge later.
raise ValueError("Writing non loop invariant values to TensorArray from "
"inside a while_loop/cond not supported.")
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if index_stacked:
raise ValueError(f"Need indices for {handle} to be loop invariant.")
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return wrap(flow_out, False)
else:
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
# TODO(agarwal): Note that if flow is unstacked and value is stacked, then
# this may or may not be a safe situation. flow is unstacked both for a
# freshly created TensorArray, as well as after unstacked values are
# written to it. If it is the latter, then we cannot write a stacked value
# now since that may cause runtime errors due to different shapes in the
# array. At the moment we are not able to handle this gracefully and
# distinguish between the two cases. That would require some heuristic
# traversal of the graph to figure out whether all the writes are
# unstacked or not.
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
else:
if not index_stacked:
raise ValueError(f"Need indices for {handle} to be not loop invariant.")
# Note that even when index_stacked is true, actual values in index may
# still not be unique. However that will cause runtime error when executing
# the scatter operation below.
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
def _transpose_first_two_dims(value):
# TODO(agarwal): optimize if one of the dims == 1.
value_shape = array_ops.shape(value)
v0 = value_shape[0]
v1 = value_shape[1]
value = array_ops.reshape(value, [v0, v1, -1])
value = array_ops.transpose(value, [1, 0, 2])
new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
return array_ops.reshape(value, new_shape)
@RegisterPFor("TensorArrayGatherV3")
def _convert_tensor_array_gather_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
dtype = pfor_input.get_attr("dtype")
# TODO(agarwal): support element_shape attr?
n = pfor_input.pfor.loop_len_vector
value = data_flow_ops.tensor_array_gather_v3(
handle, indices, flow, dtype=dtype)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
# flow_stacked indicates if values in the TensorArray are stacked or not.
if indices_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayGatherV3 was called on a TensorArray "
"whose values are not loop-invariant, and the indices were also "
"not loop invariant. This is currently unsupported.")
else:
value = _unflatten_first_dim(value, n)
return wrap(value, True)
else:
if flow_stacked:
# Since elements in this array are stacked and `value` was produced by
# gather, its first two dims are "gathered elements" and "stack
# dimension". Our semantics require these two to be flipped.
value = _transpose_first_two_dims(value)
return wrap(value, flow_stacked)
else:
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on indices_stacked.
if indices_stacked:
value = _unflatten_first_dim(value, n)
return wrap(value, indices_stacked)
@RegisterPFor("TensorArrayScatterV3")
def _convert_tensor_array_scatter_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if indices_stacked:
raise ValueError(f"Need indices for {handle} to be loop invariant.")
# Note that flow_stacked indicates if existing values in the array are
# stacked or not.
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return wrap(flow_out, False)
if not value_stacked:
# TODO(agarwal): tile in the second dimension directly instead of
# transposing below.
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _transpose_first_two_dims(value)
# TODO(agarwal): Note that if a previous write was unstacked, flow will be
# unstacked, and a stacked value may be written here which may cause
# runtime error due to different elements having different shape. We do
# not try to prevent that.
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
if not indices_stacked:
raise ValueError(f"Need indices for {handle} to be not loop invariant.")
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _flatten_first_two_dims(value)
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
@RegisterPFor("TensorArrayGradV3")
def _convert_tensor_array_grad_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
source = pfor_input.get_attr("source")
# TODO(agarwal): For now, we assume that gradients are stacked if the
# TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
# will give runtime error due to incorrect shape being written to the
# accumulator. It is difficult to know in advance if gradients written will be
# stacked or not. Note that flow being stacked is not indicative of the
# gradient being stacked or not. Revisit this later.
shape_to_prepend = pfor_input.pfor.loop_len_vector
grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
handle=handle,
flow_in=flow,
shape_to_prepend=shape_to_prepend,
source=source)
flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
return [wrap(grad_handle, False), wrap(flow_out, True)]
def _stack_tensor_list_shape(shape, first_dim):
shape_value = tensor_util.constant_value(shape)
# Note that negative values in the shape are used to signify unknown shapes
# and are handled in a special way.
if shape_value is not None:
shape_value = np.asarray(shape_value)
if -1 in shape_value:
return constant_op.constant(-1)
elif not shape_value.size:
return first_dim
else:
shape = array_ops.reshape(shape, [-1])
return tf_cond.cond(
math_ops.reduce_any(shape < 0),
lambda: constant_op.constant(-1),
lambda: array_ops.concat([first_dim, shape], axis=0))
def _tile_variant_with_length(t, length):
"""stacks `t` `length` times."""
if _is_variant_with_internal_stacking(t):
# The content of TensorLists is vectorized, not the variant itself.
return t
original_tensor = t
t.set_shape([])
t = array_ops.reshape(t, [-1])
with ops.device("CPU:0"):
result = array_ops.tile(t, length)
# TODO(b/169968286): Should regular shape functions do handle data
# propagation here?
handle_data_util.copy_handle_data(original_tensor, result)
return result
def _tile_variant(t, pfor_input):
"""stacks `t` according to its loop context."""
return _tile_variant_with_length(t, pfor_input.pfor.loop_len_vector)
def _untile_variant(t):
if _is_variant_with_internal_stacking(t):
# The content of TensorLists is vectorized, not the variant itself.
if not t.shape.is_compatible_with([]):
raise AssertionError(
("Unexpectedly saw a vectorized variant (e.g. TensorList) with "
f"non-scalar shape: {t!r}"))
return t
return array_ops.gather(t, 0)
@RegisterPFor("OptionalFromValue")
def _convert_optional_from_value(pfor_input):
pfor_input.stack_inputs()
return wrap(
gen_optional_ops.optional_from_value([x.t for x in pfor_input.inputs]),
True,
)
@RegisterPFor("OptionalGetValue")
def _convert_optional_get_value(pfor_input):
handle = pfor_input.stacked_input(0)
output_types = pfor_input.get_attr("output_types")
original_output_shapes = pfor_input.get_attr("output_shapes")
output_shapes = []
for shape in original_output_shapes:
shape = tensor_shape.TensorShape(shape)
loop_len_value = tensor_util.constant_value(pfor_input.pfor.loop_len_vector)
loop_len_shape = tensor_shape.TensorShape(
[loop_len_value[0] if loop_len_value is not None else None]
)
shape = loop_len_shape.concatenate(shape)
output_shapes.append(shape.as_proto())
results = gen_optional_ops.optional_get_value(
handle, output_types, output_shapes
)
return [wrap(t, True) for t in results]
@RegisterPFor("TensorListReserve")
def _convert_tensor_list_reserve(pfor_input):
element_shape = pfor_input.unstacked_input(0)
num_elements = pfor_input.unstacked_input(1)
element_dtype = pfor_input.get_attr("element_dtype")
# Prepend a dimension to element_shape.
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
handle = list_ops.tensor_list_reserve(
element_shape, num_elements, element_dtype=element_dtype)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListElementShape")
def _convert_tensor_list_element_shape(pfor_input):
handle = _untile_variant(pfor_input.stacked_input(0))
shape_type = pfor_input.get_attr("shape_type")
shape = list_ops.tensor_list_element_shape(handle, shape_type)
shape = array_ops.reshape(shape, [-1])
shape = shape[1:]
return wrap(shape, False)
@RegisterPFor("TensorListLength")
def _convert_tensor_list_length(pfor_input):
handle = _untile_variant(pfor_input.stacked_input(0))
return wrap(list_ops.tensor_list_length(handle), False)
def _stack_tensor_list(handle, dtype, loop_len_vector, element_shape=None):
if element_shape is None:
element_shape = list_ops.tensor_list_element_shape(handle, dtypes.int32)
length = list_ops.tensor_list_length(handle)
new_handle = list_ops.tensor_list_reserve(
_stack_tensor_list_shape(element_shape, loop_len_vector), length, dtype)
def _body_fn(i, h):
elem = list_ops.tensor_list_get_item(handle, i, dtype, element_shape)
elem = _stack(elem, loop_len_vector).t
return i + 1, list_ops.tensor_list_set_item(h, i, elem)
return while_loop.while_loop(lambda i, _: i < length, _body_fn,
[0, new_handle])[1]
@RegisterPFor("TensorListGetItem")
def _convert_tensor_list_get_item(pfor_input):
handle, handle_stacked, _ = pfor_input.input(0)
index, index_stacked, _ = pfor_input.input(1)
element_shape = pfor_input.unstacked_input(2)
element_dtype = pfor_input.get_attr("element_dtype")
if handle_stacked:
handle = _untile_variant(handle)
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
if index_stacked:
# We use a sequential loop since that may be more efficient than first
# gathering and concatenating all the element corresponding to `index`,
# and then doing a gather on it.
def _map_fn(i):
item_i = list_ops.tensor_list_get_item(
handle,
index[i],
element_dtype=element_dtype)
return array_ops.gather(item_i, i)
output = map_fn.map_fn(_map_fn, pfor_input.pfor.all_indices)
return wrap(output, True)
else:
output = list_ops.tensor_list_get_item(
handle,
index,
element_shape=element_shape,
element_dtype=element_dtype)
return wrap(output, True)
else:
assert index_stacked
return wrap(
list_ops.tensor_list_gather(
handle,
index,
element_shape=element_shape,
element_dtype=element_dtype), True)
@RegisterPFor("TensorListSetItem")
def _convert_tensor_array_set_item(pfor_input):
handle, handle_stacked, _ = pfor_input.input(0)
index, index_stacked, _ = pfor_input.input(1)
item, item_stacked, _ = pfor_input.input(2)
if not handle_stacked:
# Special case where we can statically guarantee that the indices are
# disjoint.
if index is pfor_input.pfor.all_indices:
if not item_stacked:
item = _stack(item, pfor_input.pfor.loop_len_vector).t
return wrap(
list_ops.tensor_list_scatter(item, index, input_handle=handle), False)
else:
handle = _stack_tensor_list(handle, item.dtype,
pfor_input.pfor.loop_len_vector)
else:
handle = _untile_variant(handle)
if index_stacked:
# TODO(agarwal): handle this.
raise ValueError("Vectorizing writes to a TensorList with loop "
"variant indices is currently unsupported.")
else:
if not item_stacked:
item = _stack(item, pfor_input.pfor.loop_len_vector).t
handle = list_ops.tensor_list_set_item(handle, index, item)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListPushBack")
def _convert_tensor_list_push_back(pfor_input):
handle, handle_stacked, _ = pfor_input.input(0)
tensor, tensor_stacked, _ = pfor_input.input(1)
if handle_stacked:
handle = _untile_variant(handle)
else:
handle = _stack_tensor_list(handle, tensor.dtype,
pfor_input.pfor.loop_len_vector)
if not tensor_stacked:
tensor = _stack(tensor, pfor_input.pfor.loop_len_vector).t
handle = list_ops.tensor_list_push_back(handle, tensor)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListPopBack")
def _convert_tensor_array_push_back(pfor_input):
handle = pfor_input.stacked_input(0)
element_shape = pfor_input.unstacked_input(1)
handle = _untile_variant(handle)
if element_shape.shape.ndims == 0:
# Default / unspecified
vectorized_shape = -1
else:
# PopBack has an element shape set when it's the gradient of PushBack, only
# used when the list is uninitialized.
vectorized_shape = array_ops.concat(
[pfor_input.pfor.loop_len_vector, element_shape], axis=0)
output_handle, tensor = gen_list_ops.tensor_list_pop_back(
input_handle=handle, element_dtype=pfor_input.get_attr("element_dtype"),
element_shape=vectorized_shape)
return wrap(output_handle, True), wrap(tensor, True)
@RegisterPFor("TensorListConcatV2")
def _convert_tensor_list_concat_v2(pfor_input):
input_handle = pfor_input.stacked_input(0)
element_shape = pfor_input.unstacked_input(1)
leading_dims = pfor_input.unstacked_input(2)
element_dtype = pfor_input.get_attr("element_dtype")
handle = _untile_variant(input_handle)
length = list_ops.tensor_list_length(handle)
# Note that element_shape attribute can have incomplete shapes. This doesn't
# seem to work well when creating another list and then doing a concat on it.
# Hence we try to find the dynamic shape here.
element_shape = tf_cond.cond(
length > 0, lambda: array_ops.shape(
list_ops.tensor_list_get_item(handle, 0, element_dtype, None)),
lambda: constant_op.constant([0, 0], dtype=dtypes.int32))
# The code below creates a copy of the list with each elements' first two
# dimensions transposed.
new_element_shape = array_ops.concat(
[element_shape[1:2], element_shape[0:1], element_shape[2:]], axis=0)
# Create a new TensorList with elements transposed.
def _transpose_elem(i, h):
elem = list_ops.tensor_list_get_item(handle, i, element_dtype, None)
elem = _transpose_first_two_dims(elem)
return i + 1, list_ops.tensor_list_set_item(h, i, elem)
new_handle = list_ops.tensor_list_reserve(new_element_shape, length,
element_dtype)
new_handle = while_loop.while_loop(lambda i, _: i < length, _transpose_elem,
[0, new_handle])[1]
output, lengths = gen_list_ops.tensor_list_concat_v2(
input_handle=new_handle,
element_dtype=element_dtype,
element_shape=new_element_shape,
leading_dims=leading_dims)
output = _transpose_first_two_dims(output)
return wrap(output, True), wrap(lengths, False)
@RegisterPFor("TensorListStack")
def _convert_tensor_list_stack(pfor_input):
handle = pfor_input.stacked_input(0)
input_shape = pfor_input.unstacked_input(1)
element_dtype = pfor_input.get_attr("element_dtype")
num_elements = pfor_input.get_attr("num_elements")
handle = _untile_variant(handle)
input_shape = _stack_tensor_list_shape(input_shape,
pfor_input.pfor.loop_len_vector)
output = list_ops.tensor_list_stack(
handle,
element_dtype,
element_shape=input_shape,
num_elements=num_elements)
output = _transpose_first_two_dims(output)
return wrap(output, True)
@RegisterPFor("TensorListGather")
def _convert_tensor_list_gather(pfor_input):
handle, handle_stacked, _ = pfor_input.input(0)
index, index_stacked, _ = pfor_input.input(1)
element_shape = pfor_input.unstacked_input(2)
element_dtype = pfor_input.get_attr("element_dtype")
if handle_stacked:
handle = _untile_variant(handle)
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
if index_stacked:
# We use a sequential loop since that may be more efficient than first
# gathering and concatenating all the element corresponding to `index`,
# and then doing a gather on it.
def _map_fn(i):
item_i = list_ops.tensor_list_gather(
handle,
index[i],
element_dtype=element_dtype)
axis = array_ops.rank(index) - 1
return array_ops.gather(item_i, i, axis=axis)
output = map_fn.map_fn(_map_fn, pfor_input.pfor.all_indices)
return wrap(output, True)
else:
output = list_ops.tensor_list_gather(
handle,
index,
element_shape=element_shape,
element_dtype=element_dtype)
return wrap(output, True)
else:
assert index_stacked
index_shape = array_ops.shape(index)
index = array_ops.reshape(index, [-1])
values = list_ops.tensor_list_gather(
handle, index, element_shape=element_shape, element_dtype=element_dtype)
final_shape = array_ops.concat(
[index_shape, array_ops.shape(values)[1:]], axis=0)
return wrap(array_ops.reshape(values, final_shape), True)
@RegisterPFor("TensorListScatterIntoExistingList")
def _convert_tensor_list_scatter(pfor_input):
pfor_input.stack_inputs([1])
handle, handle_stacked, _ = pfor_input.input(0)
item = pfor_input.stacked_input(1)
indices, indices_stacked, _ = pfor_input.input(2)
if handle_stacked:
handle = _untile_variant(handle)
else:
handle = _stack_tensor_list(handle, item.dtype,
pfor_input.pfor.loop_len_vector)
item = _transpose_first_two_dims(item)
if indices_stacked:
# Pretend the list is a dense tensor:
# list_as_dense: Tensor[list_len, loop_len, ...]
# And indices are a tensor with shape (before transpose):
# indices: Tensor[loop_len, num_scatters]
# The item to scatter has shape (before transpose):
# item: Tensor[loop_len, num_scatters, ...]
#
# We want list_as_dense[indices[i, j], i] = item[i, j]
#
# Since we're not just indexing along the first axis of `list_as_dense`, we
# need to first extract the relevant list entries based on `indices`,
# scatter into them according to the loop index, and re-scatter the chunks
# we updated back into the list.
indices = _transpose_first_two_dims(indices)
indices_flat = array_ops.reshape(indices, [-1])
# In many cases `indices` will be unique across pfor iterations, but this is
# not guaranteed. If there are duplicates, we need to map multiple updates
# to a single chunk extracted from the list. The last update should win.
unique_indices = array_ops.unique(indices_flat)
gathered_items = list_ops.tensor_list_gather(
handle, unique_indices.y, element_dtype=item.dtype,
element_shape=array_ops.shape(item)[1:])
loop_idx = math_ops.range(pfor_input.pfor.loop_len_vector[0])
scatters_per_op = array_ops.shape(indices)[0]
unique_indices_loop_idx = array_ops.reshape(array_ops.tile(
loop_idx[None, :], [scatters_per_op, 1]), [-1])
scatter_indices = array_ops_stack.stack(
[unique_indices.idx, unique_indices_loop_idx],
axis=1)
# This op does *not* guarantee last-update-wins on GPU, so semantics may not
# be exactly preserved for duplicate updates there.
scattered = array_ops.tensor_scatter_nd_update(
tensor=gathered_items,
indices=scatter_indices,
updates=_flatten_first_two_dims(item))
handle = list_ops.tensor_list_scatter(
scattered, unique_indices.y, input_handle=handle)
else:
handle = list_ops.tensor_list_scatter(item, indices, input_handle=handle)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListFromTensor")
def _convert_tensor_list_from_tensor(pfor_input):
tensor = pfor_input.stacked_input(0)
element_shape = pfor_input.unstacked_input(1)
tensor = _transpose_first_two_dims(tensor)
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
handle = list_ops.tensor_list_from_tensor(tensor, element_shape)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorScatterUpdate")
def _convert_tensor_scatter_update(pfor_input):
pfor_input.stack_inputs([0, 1, 2])
tensor = pfor_input.stacked_input(0)
indices = pfor_input.stacked_input(1)
updates = pfor_input.stacked_input(2)
indices_shape = array_ops.shape(indices)
indices_rank = array_ops.rank(indices)
loop_length = indices_shape[0]
# Create a loop count range and extend its dimensions to match `indices`.
loop_count_shape = array_ops.tensor_scatter_nd_update(
array_ops.ones([indices_rank], dtype=dtypes.int32), [[0]], [loop_length])
loop_count = array_ops.reshape(math_ops.range(loop_length), loop_count_shape)
# Tile the loop count range for the batch dimensions (all except the first and
# last dimensions of indices).
# Rank(indices) >= 3 always for this function so we always have at least 1.
tile_multiplier = array_ops.tensor_scatter_nd_update(
indices_shape, [[0], [indices_rank - 1]], [1, 1])
meta_index = array_ops.tile(loop_count, tile_multiplier)
# Insert the loop-identifying index.
indices = array_ops.concat([meta_index, indices], axis=-1)
result = array_ops.tensor_scatter_nd_update(tensor, indices, updates)
return wrap(result, True)
# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
# to TensorArrays, we convert them by changing the dimension of the elements
# inside the stack.
#
# We consider two cases:
#
# 1. StackV2 is constructed and used entirely inside the pfor loop.
# We keep a single Stack and perform the push/pop operations of all the
# iterations in lock-step. We also assume that all the iterations perform these
# operations. In case of dynamic control flow, if only some of the iterations
# try to perform a push/pop, then the conversion may not work correctly and may
# cause undefined behavior.
# TODO(agarwal): test StackV2 with dynamic control flow.
#
# 2. StackV2 is constructed outside the pfor loop.
# Performing stack push/pop in a parallel fashion is ill-defined. However given
# that reading stacks created externally is a common operation when computing
# jacobians, we provide some special semantics here as follows.
# - disallow push operations to the stack
# - pop operations are performed in lock step by all iterations, similar to the
# case when the stack is created inside. A single value is popped during the
# lock-step operation and broadcast to all the iterations. Values in the stack
# are assumed to be loop-invariant.
#
# Some other implementation details:
# We use an ugly logic to find whether values in Stack data structure are
# loop invariant or not. When converting push/pop operations, we keep track of
# whether the last conversion used a stacked value or not (see _stack_cache
# below). As a result if an unstacked value is written first, subsequent stacked
# writes are disallowed when they could have been allowed in theory.
# Map from cache key based on StackV2 handle to a bool indicating whether values
# are stacked or not.
# TODO(agarwal): move _stack_cache inside pfor?
_stack_cache = {}
def _stack_cache_key(pfor_input):
"""Create cache key corresponding to a stack handle."""
op_type = pfor_input.op_type
assert op_type in ["StackPushV2", "StackPopV2"], op_type
orig_handle = pfor_input.op.inputs[0]
while orig_handle.op.type in ["Identity", "Enter"]:
orig_handle = orig_handle.op.inputs[0]
assert orig_handle.op.type == "StackV2", orig_handle.op
return ops.get_default_graph(), pfor_input.pfor, orig_handle
def _stack_handle_inside_pfor(handle, pfor_input):
while handle.op.type in ["Identity", "Enter"]:
handle = handle.op.inputs[0]
assert handle.op.type == "StackV2", ("Unable to find StackV2 op. Got %s" %
handle.op)
return pfor_input.pfor.op_is_inside_loop(handle.op)
@RegisterPFor("StackPushV2")
def _convert_stack_push_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
elem, elem_stacked, _ = pfor_input.input(1)
swap_memory = pfor_input.get_attr("swap_memory")
if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
raise ValueError("StackPushV2 not allowed on stacks created outside pfor.")
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
if stacked is None:
stacked = elem_stacked
_stack_cache[stack_cache_key] = stacked
else:
# If we previously made it unstacked then we can't revert to being stacked.
if not stacked and elem_stacked:
raise ValueError(
"It looks like the stack was previously determined to be loop "
"invariant, but we are now trying to push a loop dependent value "
"to it. This is currently unsupported.")
if stacked and not elem_stacked:
elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
return wrap(out, stacked)
# Note that inputs to this convertor will be unstacked. However it should get
# called since it is a stateful op.
@RegisterPFor("StackPopV2")
def _convert_stack_pop_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
# If a StackPushV2 has not been converted yet, we default to unstacked since
# the push could be outside of pfor, or the convertor may not be called if the
# inputs are unconverted.
if stacked is None:
stacked = False
_stack_cache[stack_cache_key] = False
elem_type = pfor_input.get_attr("elem_type")
out = data_flow_ops.stack_pop_v2(handle, elem_type)
return wrap(out, stacked)
# parsing_ops
@RegisterPFor("DecodeCSV")
def _convert_decode_csv(pfor_input):
lines = pfor_input.stacked_input(0)
record_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
field_delim = pfor_input.get_attr("field_delim")
use_quote_delim = pfor_input.get_attr("use_quote_delim")
select_cols = pfor_input.get_attr("select_cols")
if not select_cols:
select_cols = None
return [
wrap(t, True) for t in parsing_ops.decode_csv(
lines,
record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
select_cols=select_cols)
]
@RegisterPFor("ParseSingleExample")
def _convert_parse_single_example(pfor_input):
serialized = pfor_input.stacked_input(0)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
sparse_keys = pfor_input.get_attr("sparse_keys")
dense_keys = pfor_input.get_attr("dense_keys")
sparse_types = pfor_input.get_attr("sparse_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
output = gen_parsing_ops.parse_example(
serialized=serialized,
names=[],
dense_defaults=dense_defaults,
sparse_keys=sparse_keys,
dense_keys=dense_keys,
sparse_types=sparse_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
@RegisterPFor("ParseExampleV2")
def _convert_parse_example_v2(pfor_input):
serialized = pfor_input.stacked_input(0)
sparse_keys = pfor_input.unstacked_input(2)
dense_keys = pfor_input.unstacked_input(3)
ragged_keys = pfor_input.unstacked_input(4)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(5, pfor_input.num_inputs)
]
num_sparse = pfor_input.get_attr("num_sparse")
sparse_types = pfor_input.get_attr("sparse_types")
ragged_value_types = pfor_input.get_attr("ragged_value_types")
ragged_split_types = pfor_input.get_attr("ragged_split_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
if serialized.shape.ndims not in (None, 1):
raise ValueError("ParseExampleV2 can only be converted if `serialized` "
f"is scalar. Received shape: {serialized.shape}.")
output = gen_parsing_ops.parse_example_v2(
serialized=serialized,
names=[],
sparse_keys=sparse_keys,
dense_keys=dense_keys,
ragged_keys=ragged_keys,
dense_defaults=dense_defaults,
num_sparse=num_sparse,
sparse_types=sparse_types,
ragged_value_types=ragged_value_types,
ragged_split_types=ragged_split_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
# functional_ops
def _convert_function_call(func, converter, inputs):
assert isinstance(func.graph, func_graph.FuncGraph), func
assert isinstance(converter, PFor)
graph_outputs = func.graph.outputs[:len(func.function_type.flat_outputs)]
# TODO(agarwal): consider caching this function definition.
@def_function.function
def f(*args):
assert all(isinstance(arg, WrappedTensor) for arg in args), args
assert len(args) == len(func.graph.inputs), (args, func.graph.inputs)
# Map inputs to function arguments.
for inp, arg in zip(func.graph.inputs, args):
converter._add_conversion(inp, arg)
# Convert output tensors.
return tuple([converter._convert_helper(x).t for x in graph_outputs])
call_outputs = f(*inputs)
assert len(call_outputs) == len(graph_outputs)
outputs = []
for call_output, output_tensor in zip(call_outputs, graph_outputs):
func_output = converter._convert_helper(output_tensor)
outputs.append(
wrap(call_output, func_output.is_stacked, func_output.is_sparse_stacked)
)
return outputs
@RegisterPFor("StatefulPartitionedCall")
@RegisterPFor("PartitionedCall")
def _convert_partitioned_call(pfor_input):
func_name = pfor_input.get_attr("f").name
func = pfor_input.op.graph._get_function(compat.as_bytes(func_name))
assert isinstance(func.graph, func_graph.FuncGraph), (
"Could not find FuncGraph object for %s. Got func %s" % (func_name, func))
pfor = pfor_input.pfor
converter = PFor(
loop_var=pfor.loop_var,
loop_len=pfor.loop_len_vector[0],
pfor_ops=func.graph.get_operations(),
fallback_to_while_loop=pfor.fallback_to_while_loop,
all_indices=pfor.all_indices,
all_indices_partitioned=pfor.all_indices_partitioned,
pfor_config=pfor.pfor_config)
return _convert_function_call(func, converter, pfor_input.inputs)
def _partition_inputs_for_indices(inputs, indices):
new_inputs = []
for inp in inputs:
if inp.is_stacked:
new_inputs.append(wrap(array_ops.gather(inp.t, indices), True))
else:
new_inputs.append(inp)
return new_inputs
def _outputs_for_branch(func_name, indices, pfor_input, inputs):
if indices is None:
indices = pfor_input.pfor.all_indices
partitioned = pfor_input.pfor.all_indices_partitioned
else:
partitioned = True
func = pfor_input.op.graph._get_function(func_name)
converter = PFor(
loop_var=pfor_input.pfor.loop_var,
loop_len=array_ops.size(indices),
pfor_ops=func.graph.get_operations(),
fallback_to_while_loop=pfor_input.pfor.fallback_to_while_loop,
all_indices=indices,
all_indices_partitioned=partitioned,
pfor_config=pfor_input.pfor.pfor_config)
outputs = _convert_function_call(func, converter, inputs)
stacked_outputs = []
for out in outputs:
if not out.is_stacked:
stacked_outputs.append(_stack(out.t, [array_ops.size(indices)]).t)
else:
stacked_outputs.append(out.t)
return stacked_outputs
# TODO(agarwal): Currently the converted code aggressively tiles loop variant
# outputs from the then/else branches. Instead, it could do so only if at least
# one of the branch outputs is loop variant.
@RegisterPFor("StatelessIf")
@RegisterPFor("If")
def _convert_if(pfor_input):
cond, cond_stacked, _ = pfor_input.input(0)
inputs = pfor_input.inputs[1:]
then_branch = pfor_input.get_attr("then_branch")
else_branch = pfor_input.get_attr("else_branch")
if cond_stacked:
cond_int = math_ops.cast(cond, dtypes.int32)
# Compute loop indices for the different branches
false_indices, true_indices = data_flow_ops.dynamic_partition(
pfor_input.pfor.all_indices, cond_int, 2)
# Compute indices for cond being True or False.
if pfor_input.pfor.all_indices_partitioned:
else_indices, then_indices = data_flow_ops.dynamic_partition(
math_ops.range(pfor_input.pfor.loop_len_vector[0]),
cond_int, 2)
else:
else_indices, then_indices = false_indices, true_indices
# Partition inputs
then_inputs = _partition_inputs_for_indices(inputs, then_indices)
else_inputs = _partition_inputs_for_indices(inputs, else_indices)
# Convert "then" branch.
then_outputs = _outputs_for_branch(then_branch.name, true_indices,
pfor_input, then_inputs)
# Convert "else" branch.
else_outputs = _outputs_for_branch(else_branch.name, false_indices,
pfor_input, else_inputs)
assert len(then_outputs) == len(else_outputs)
# Note that if the "then" and "else" branches are updating the same state,
# and possibly reading them as well, it could lead to undefined behavior
# since the ordering of those operations is not well defined.
# One possibility is to order all the "then" branches to execute before all
# the "else" branches so that the side-effects in the former are visible to
# the latter. For now, we leave that as undefined behavior.
outputs = []
# Merge outputs
for then_output, else_output in zip(then_outputs, else_outputs):
out = data_flow_ops.dynamic_stitch([then_indices, else_indices],
[then_output, else_output])
outputs.append(wrap(out, True))
return outputs
else:
outputs = tf_cond.cond(
cond,
lambda: _outputs_for_branch(then_branch.name, None, pfor_input, inputs),
lambda: _outputs_for_branch(else_branch.name, None, pfor_input, inputs))
return [wrap(t, True) for t in outputs]
@RegisterPFor("Case")
@RegisterPFor("StatelessCase")
def _convert_stateless_case(pfor_input):
branch_idx, is_stacked, _ = pfor_input.input(0)
branches = pfor_input.get_attr("branches")
inputs = pfor_input.inputs[1:]
if is_stacked:
logging.info("Running stacked flow")
# Compute loop indices for the different branches
switch_indices = data_flow_ops.dynamic_partition(
pfor_input.pfor.all_indices, branch_idx, len(branches))
if pfor_input.pfor.all_indices_partitioned:
partitioned_indices = data_flow_ops.dynamic_partition(
math_ops.range(pfor_input.pfor.loop_len_vector[0]), branch_idx,
len(branches))
else:
partitioned_indices = switch_indices
# Partition inputs
input_list = []
for indices in partitioned_indices:
input_list.append(_partition_inputs_for_indices(inputs, indices))
outputs = []
for (b, indices, inputs) in zip(branches, switch_indices, input_list):
out = _outputs_for_branch(b.name, indices, pfor_input, inputs)
outputs.extend(out)
out = data_flow_ops.dynamic_stitch(partitioned_indices, outputs)
return [wrap(out, True)]
else:
new_branches = []
for b in branches:
def new_function(func=b.name):
return _outputs_for_branch(func, None, pfor_input,
pfor_input.inputs[1:])
new_branches.append(new_function)
outputs = []
outputs = control_flow_switch_case.switch_case(branch_idx, new_branches)
return [wrap(t, True) for t in outputs]
class WhileV2:
"""Object for vectorizing V2 while_loop op."""
def __init__(self, pfor_input):
self._pfor_input = pfor_input
self._pfor = pfor_input.pfor
cond_func_name = pfor_input.get_attr("cond").name
self._cond_func = pfor_input.op.graph._get_function(compat.as_bytes(
cond_func_name))
body_func_name = pfor_input.get_attr("body").name
self._body_func = pfor_input.op.graph._get_function(compat.as_bytes(
body_func_name))
if self._cond_func is None or self._body_func is None:
raise ValueError("Error extracting cond and body functions for op "
f"{self._pfor_input.op}.")
# Indices of inputs that are passed unchanged through the while loop body.
# Typically these are tensors captured from outside the body context.
self._body_pass_through_indices = set()
for i, (inp, out) in enumerate(zip(self._body_func.graph.inputs,
self._body_func.graph.outputs)):
if id(inp) == id(out):
self._body_pass_through_indices.add(i)
self._parallel_iterations = self._pfor_input.get_attr("parallel_iterations")
def _output_shapes(self):
# Calculate output shape for vectorized loop. This will be used as
# shape_invariant. Merges shape inference outputs with the `output_shapes`
# attribute of the op.
output_shapes = [out.shape for out in self._pfor_input.op.outputs]
shapes = self._pfor_input.get_attr("output_shapes")
if not shapes:
shapes = [tensor_shape.TensorShape(None) for _ in output_shapes]
else:
shapes = [tensor_shape.TensorShape(shape) for shape in shapes]
for i, shape in enumerate(shapes):
shape = shape.merge_with(output_shapes[i])
pfor_input = self._pfor_input.input(i)
if pfor_input.is_stacked:
if _is_variant_with_internal_stacking(pfor_input.t):
shape = tensor_shape.TensorShape([]).concatenate(shape)
else:
shape = tensor_shape.TensorShape([None]).concatenate(shape)
output_shapes[i] = shape
assert len(output_shapes) == self._pfor_input.num_inputs
return output_shapes
def _init_values(self):
"""Create arguments passed to converted while_loop."""
loop_len = self._pfor.loop_len_vector[0]
inputs = []
# TensorArrays for outputs of converted while loop
output_tas = []
with ops.name_scope("while_init"):
for inp in self._pfor_input.inputs:
inputs.append(inp.t)
variant_type_id = _variant_type_id(inp.t)
if variant_type_id in _INTERNAL_STACKING_TYPE_IDS:
if variant_type_id != full_type_pb2.TFT_ARRAY:
raise NotImplementedError(
"While loop conversion is only supported for TensorLists. Got "
f"another variant {inp.t}, probably an optional. Please file "
"a bug.")
# For TensorLists, the input format is:
#
# List[user_list_len, Tensor[loop_len, ...]]
#
# rather than the usual
#
# Tensor[loop_len, ...]
#
# The body of the loop will take and return lists in this "internal
# vectorization" format, so we want to keep it that way as much as
# possible. We'll accumulate finished iterations (only relevant for
# pfor-loop-variant while_loop conditions) in an accumulator with
# type :
#
# List[user_list_len, List[loop_len, Tensor[...]]]
#
# This means that each while_loop iteration, we'll iterate over the
# length of the TensorList, dividing done/remaining pfor loop indices
# and scattering the done indices into the inner nested list of the
# accumulator.
element_shape = list_ops.tensor_list_element_shape(
inp.t, dtypes.int32)
if inp.is_stacked:
# Shapes may be tf.constant(-1) for fully dynamic, in which case
# slicing is an error.
element_shape = tf_cond.cond(
math_ops.equal(array_ops.rank(element_shape), 0),
lambda: element_shape,
lambda: element_shape[1:])
dtype = _parse_variant_shapes_and_types(inp.t)[0].dtype
def _init_loop_body(index, output_ta):
output_ta = output_ta.write(
index,
list_ops.tensor_list_reserve(element_shape, loop_len, dtype))
return index + 1, output_ta
length = list_ops.tensor_list_length(inp.t)
output_ta = tensor_array_ops.TensorArray(
inp.t.dtype, # Variant; this is a nested TensorList
size=length,
dynamic_size=True,
infer_shape=False)
_, output_ta = while_loop.while_loop(lambda index, _: index < length,
_init_loop_body, [0, output_ta])
else:
output_ta = tensor_array_ops.TensorArray(
inp.t.dtype,
size=loop_len,
dynamic_size=False,
infer_shape=True)
output_tas.append(output_ta)
# See documentation for __call__ for the structure of init_values.
indices = (
math_ops.range(self._pfor.loop_len_vector[0])
if self._pfor.all_indices_partitioned else self._pfor.all_indices)
return [True, indices] + inputs + output_tas
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is pfor loop invariant."""
# Note that all iterations end together. So we don't need to partition the
# inputs.
not_all_done = array_ops.reshape(conditions, [])
return not_all_done, indices, inputs, output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
"""Handles case when condition is pfor loop dependent."""
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
pass_through = i in self._body_pass_through_indices
if not pass_through and _variant_type_id(inp) == full_type_pb2.TFT_ARRAY:
shape_and_type = _parse_variant_shapes_and_types(inp)[0]
element_shape = list_ops.tensor_list_element_shape(inp, dtypes.int32)
user_list_len = list_ops.tensor_list_length(inp)
def _split_vectorized_ta_element(index, new_inp, new_out_ta):
elem = list_ops.tensor_list_get_item(inp, index, shape_and_type.dtype,
element_shape)
if stacked:
done_elem, new_elem = data_flow_ops.dynamic_partition(
elem, conditions_int, 2)
new_inp = list_ops.tensor_list_set_item(new_inp, index, new_elem)
else:
done_elem = _stack(elem, [array_ops.size(done_indices)]).t
done_accum = new_out_ta.read(index)
done_accum = list_ops.tensor_list_scatter(
tensor=done_elem, indices=done_indices, input_handle=done_accum)
new_out_ta = new_out_ta.write(index, done_accum)
return index + 1, new_inp, new_out_ta
length = list_ops.tensor_list_length(inp)
new_inp = list_ops.tensor_list_reserve(
tensor_shape.TensorShape([None])
+ tensor_shape.TensorShape(shape_and_type.shape)[1:],
user_list_len, shape_and_type.dtype)
_, new_inp, out_ta = while_loop.while_loop(
lambda index, unused_new_inp, unused_new_out_ta: index < length,
_split_vectorized_ta_element, [0, new_inp, output_tas[i]])
else:
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
if not pass_through:
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
out_ta = output_tas[i]
if not pass_through:
# Note that done_indices can be empty. done_inp should also be empty
# in that case.
out_ta = out_ta.scatter(done_indices, done_inp)
new_inputs.append(new_inp)
new_output_tas.append(out_ta)
assert len(new_output_tas) == len(output_tas)
assert len(new_inputs) == len(inputs)
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(self, inputs_stacked, new_indices, cond_stacked,
new_inputs, not_all_done):
"""Convert the body function."""
# This is used to store the indices of inputs to the while op that need to
# be stacked. This stacking may be needed in cases where the input to the
# while_loop is loop_invariant but the corresponding output is not.
mismatching_stacked_indices = []
def true_fn():
"""Converts the body function for all but last iteration."""
wrapped_inputs = [wrap(inp, stacked) for inp, stacked in
zip(new_inputs, inputs_stacked)]
# Note the iterative process below to figure out loop invariance.
# Here we iterate on vectorization process till a fixed point. The issue
# is that the while body can take pfor loop invariant inputs but return
# loop variant outputs. For any loop variant output, the corresponding
# input has to be then made loop variant (since subsequent while
# iterations will need to see loop variant values).
# However once we make a new input loop variant, we might make other
# outputs loop variant. Hence we need to iterate till we get fixed point.
while True:
if self._pfor.all_indices_partitioned:
indices = array_ops.gather(self._pfor.all_indices, new_indices)
else:
indices = new_indices
body_pfor = PFor(
loop_var=self._pfor.loop_var,
loop_len=array_ops.size(new_indices),
pfor_ops=self._body_func.graph.get_operations(),
fallback_to_while_loop=self._pfor.fallback_to_while_loop,
all_indices=indices,
all_indices_partitioned=(self._pfor.all_indices_partitioned or
cond_stacked),
pfor_config=self._pfor.pfor_config)
stacking_mismatch = False
outputs = _convert_function_call(self._body_func,
body_pfor,
wrapped_inputs)
for i, (out, inp) in enumerate(zip(outputs, wrapped_inputs)):
if out.is_stacked != inp.is_stacked:
stacking_mismatch = True
mismatching_stacked_indices.append(i)
stacked = _stack(inp.t, [array_ops.size(new_indices)])
if inp.t.dtype == dtypes.variant:
stacked = wrap(
_tile_variant_with_length(stacked.t,
[array_ops.size(new_indices)]))
wrapped_inputs[i] = stacked
if not stacking_mismatch:
if mismatching_stacked_indices:
# We needed to stack some inputs. This code will be abandoned and
# should not get executed. Hence we simply return `new_inputs` to
# make sure the graph construction code completes.
with ops.control_dependencies([
control_flow_assert.Assert(
False, ["pfor ERROR: this branch should never execute"])
]):
return [array_ops.identity(x) for x in new_inputs]
else:
return [out.t for out in outputs]
# If all are done, we simply return `new_inputs`. Else we need to run the
# body function.
return tf_cond.cond(
not_all_done,
true_fn,
lambda: list(new_inputs)), mismatching_stacked_indices
def __call__(self):
"""Converter for the V2 while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the pfor iterations that are not
done.
args: Remaining arguments. These can be divided into 2 categories:
- The first set of arguments correspond one-to-one to the inputs to the
unvectorized while_loop.
- The second set are TensorArrays, corresponding one-to-one to each output
of the unvectorized while_loop. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the i'th
iteration of pfor. Note that elements can be written into these tensors
arrays in any order, depending on when the corresponding pfor iteration
is done.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Returns:
List of converted outputs.
"""
output_shapes = self._output_shapes()
# Note that we use these lists as a hack since we need the `body` to compute
# these values during construction of the while_loop graph.
cond_is_stacked = [None]
indices_to_stack = []
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentation for __call__ for the structure of *args.
num_inputs = self._pfor_input.num_inputs
inputs = args[:num_inputs]
output_tas = args[num_inputs:]
inputs_stacked = [x.is_stacked for x in self._pfor_input.inputs]
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set all_indices_partitioned to True here. At this point
# we don't know if indices will be partitioned. Hence we use the
# conservative value.
cond_pfor = PFor(
loop_var=self._pfor.loop_var,
loop_len=array_ops.size(indices),
pfor_ops=self._cond_func.graph.get_operations(),
fallback_to_while_loop=self._pfor.fallback_to_while_loop,
all_indices=indices,
all_indices_partitioned=True,
pfor_config=self._pfor.pfor_config)
wrapped_inputs = [wrap(inp, stacked) for inp, stacked
in zip(inputs, inputs_stacked)]
conditions, cond_stacked, _ = _convert_function_call(
self._cond_func,
cond_pfor,
wrapped_inputs)[0]
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices, new_inputs,
new_output_tas) = self._process_cond_unstacked(conditions, indices,
inputs, output_tas)
else:
(not_all_done, new_indices, new_inputs,
new_output_tas) = self._process_cond_stacked(conditions, indices,
inputs, inputs_stacked,
output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs, mismatching_stacked_indices = self._process_body(
inputs_stacked, new_indices, cond_stacked, new_inputs, not_all_done)
indices_to_stack[:] = mismatching_stacked_indices
for i, new_output in enumerate(new_outputs):
new_output.set_shape(output_shapes[i])
new_args = ([not_all_done, new_indices] + new_outputs +
list(new_output_tas))
return tuple(new_args)
# Note that we run the code below in a function since we might abandon the
# generated code in cases where the conversion dictates that some inputs be
# further stacked. Hence we run the graph construction using
# `get_concrete_function` and avoid calling the constructed function if not
# needed.
@def_function.function
def while_fn():
# Create init_values that will be passed to the while_loop.
init_values = self._init_values()
ta_shape_invariants = [tensor_shape.TensorShape([]) for _ in
self._pfor_input.outputs]
shape_invariants = (
[tensor_shape.TensorShape([]), tensor_shape.TensorShape([None])]
+ output_shapes + ta_shape_invariants)
while_outputs = while_loop.while_loop(
cond,
body,
init_values,
shape_invariants=shape_invariants,
parallel_iterations=self._parallel_iterations)
if indices_to_stack:
# This function will be abandoned.
return while_outputs
else:
num_inputs = self._pfor_input.num_inputs
new_inputs = while_outputs[2:num_inputs+2]
output_tas = while_outputs[num_inputs+2:]
assert cond_is_stacked[0] is not None
outputs = []
for i, inp in enumerate(new_inputs):
if cond_is_stacked[0]:
if i in self._body_pass_through_indices:
outputs.append(init_values[i + 2])
else:
ta = output_tas[i]
if _variant_type_id(inp) == full_type_pb2.TFT_ARRAY:
shape_and_type = _parse_variant_shapes_and_types(inp)[0]
length = list_ops.tensor_list_length(inp)
# We have been accumulating values in a:
#
# List[user_list_len, List[loop_len, Tensor[...]]]
#
# We want to return an output in the same format as the input:
#
# List[user_list_len, Tensor[loop_len, ...]]
#
# So we need to loop over the list and stack its contents.
def _stack_loop_body(index, output_list):
current_value = ta.read(index)
output_list = list_ops.tensor_list_set_item(
output_list, index,
list_ops.tensor_list_stack(
current_value, shape_and_type.dtype))
return index + 1, output_list
output_list = list_ops.tensor_list_reserve(
tensor_shape.TensorShape(shape_and_type.shape), length,
shape_and_type.dtype)
_, output_list = while_loop.while_loop(
lambda index, _: index < length, _stack_loop_body,
[0, output_list])
outputs.append(output_list)
else:
outputs.append(ta.stack())
else:
outputs.append(inp)
return outputs
_ = while_fn.get_concrete_function()
if indices_to_stack:
# Need to abandon the current conversion, stack some inputs and restart.
self._pfor_input.stack_inputs(
stack_indices=indices_to_stack, tile_variants=True)
# Note that this call will recurse at most one time. The first call will
# do the required stacking, based on the iterative procedure in
# _process_body, and the next invocation to __call__ should not need to do
# any more stacking.
# We invoke `self()` here as a way to discard any corrupted state.
return self()
else:
outputs = while_fn()
wrapped_outputs = []
for i, (out, inp) in enumerate(zip(outputs, self._pfor_input.inputs)):
if i not in self._body_pass_through_indices and cond_is_stacked[0]:
wrapped_outputs.append(wrap(out, True))
else:
wrapped_outputs.append(wrap(out, inp.is_stacked))
return wrapped_outputs
@RegisterPFor("StatelessWhile")
@RegisterPFor("While")
def _convert_while(pfor_input):
converter = WhileV2(pfor_input)
return converter()
# spectral_ops
@RegisterPForWithArgs("FFT", gen_spectral_ops.fft)
@RegisterPForWithArgs("FFT2D", gen_spectral_ops.fft2d)
@RegisterPForWithArgs("FFT3D", gen_spectral_ops.fft3d)
@RegisterPForWithArgs("IFFT", gen_spectral_ops.ifft)
@RegisterPForWithArgs("IFFT2D", gen_spectral_ops.ifft2d)
@RegisterPForWithArgs("IFFT3D", gen_spectral_ops.ifft3d)
def _convert_fft(pfor_input, _, op_func):
return wrap(op_func(pfor_input.stacked_input(0)), True)
@RegisterPForWithArgs("RFFT", gen_spectral_ops.rfft, "Tcomplex")
@RegisterPForWithArgs("RFFT2D", gen_spectral_ops.rfft2d, "Tcomplex")
@RegisterPForWithArgs("RFFT3D", gen_spectral_ops.rfft3d, "Tcomplex")
@RegisterPForWithArgs("IRFFT", gen_spectral_ops.irfft, "Treal")
@RegisterPForWithArgs("IRFFT2D", gen_spectral_ops.irfft2d, "Treal")
@RegisterPForWithArgs("IRFFT3D", gen_spectral_ops.irfft3d, "Treal")
def _convert_rfft(pfor_input, _, op_func, attr_name):
inp = pfor_input.stacked_input(0)
fft_length = pfor_input.unstacked_input(1)
attr = pfor_input.get_attr(attr_name)
return wrap(op_func(inp, fft_length, attr), True)
|
PypiClean
|
/otpstore-0.1.3.tar.gz/otpstore-0.1.3/CONTRIBUTING.rst
|
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/suryasankar/otpstore/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
otpstore could always use more documentation, whether as part of the
official otpstore docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/suryasankar/otpstore/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `otpstore` for local development.
1. Fork the `otpstore` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/otpstore.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv otpstore
$ cd otpstore/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 otpstore tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8, and for PyPy. Check
https://travis-ci.com/suryasankar/otpstore/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests.test_otpstore
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bump2version patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
|
PypiClean
|
/TermEmulator-1.0.2.tar.gz/TermEmulator-1.0.2/README.rst
|
TermEmulator
============
``TermEmulator`` is a pure python module for emulating VT100 terminal programs.
It handles V100 special characters and most important escape sequences.
It also handles graphics rendition which specifies text style (i.e. bold, italics),
foreground color and background color.
The handled escape sequences are ``CUU``, ``CUD``, ``CUF``, ``CUB``, ``CHA``,
``CUP``, ``ED``, ``EL``, ``VPA`` and ``SGR``.
Development
===========
``TermEmulator`` source code and tracker are at https://github.com/sivachandran/TermEmulator.
|
PypiClean
|
/romeapi-0.1.5.tar.gz/romeapi-0.1.5/cybex-rome/connect.py
|
import requests
import struct
from binascii import unhexlify
from datetime import datetime
import time
from .core import operations, signedtransactions
# from bitsharesbase import operations, signedtransactions
from .core.ecdsa import quick_sign_message
import graphenebase.ecdsa
""" CCXT
The unified ccxt API is a subset of methods common among the exchanges. It currently contains the following methods:
fetch_markets (): Fetches a list of all available markets from an exchange and returns an array of markets (objects with properties such as assetPair, base, quote etc.). Some exchanges do not have means for obtaining a list of markets via their online API. For those, the list of markets is hardcoded.
load_markets ([reload]): Returns the list of markets as an object indexed by assetPair and caches it with the exchange instance. Returns cached markets if loaded already, unless the reload = true flag is forced.
fetch_order_book (assetPair[, limit = undefined[, params = {}]]): Fetch L2/L3 order book for a particular market trading assetPair.
fetchL2OrderBook (assetPair[, limit = undefined[, params]]): Level 2 (price-aggregated) order book for a particular assetPair.
fetch_trades (assetPair[, since[, [limit, [params]]]]): Fetch recent trades for a particular trading assetPair.
fetch_ticker (assetPair): Fetch latest ticker data by trading assetPair.
fetch_balance (): Fetch Balance.
create_order (assetPair, type, side, amount[, price[, params]])
create_limit_buy_order (assetPair, amount, price[, params])
createLimitSellOrder (assetPair, amount, price[, params])
createMarketBuyOrder (assetPair, amount[, params])
createMarketSellOrder (assetPair, amount[, params])
cancelOrder (id[, assetPair[, params]])
fetchOrder (id[, assetPair[, params]])
fetchOrders ([assetPair[, since[, limit[, params]]]])
fetchOpenOrders ([assetPair[, since, limit, params]]]])
fetchClosedOrders ([assetPair[, since[, limit[, params]]]])
fetchMyTrades ([assetPair[, since[, limit[, params]]]])
"""
graphenebase.signedtransactions.sign_message = quick_sign_message
class CybexAPIException(Exception):
def __init__(self, response):
self.code = 0
try:
json_res = response.json()
except ValueError:
self.message = 'Invalid JSON error message from Cybex: {}'.format(response.text)
else:
self.code = json_res['code']
self.message = json_res['msg']
self.status_code = response.status_code
self.response = response
self.request = getattr(response, 'request', None)
def __str__(self): # pragma: no cover
return 'API error (code=%s): %s' % (self.code, self.message)
class CybexRequestException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'CybexRequestException: %s' % self.message
class CybexSignerException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'CybexSignerException: %s' % self.message
class CybexException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'CybexException: %s' % self.message
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
class Pair:
def __init__(self, pair, available_assets):
if "/" in pair:
_assets = pair.split("/")
self.base = {"assetPair": _assets[0]}
self.quote = {"assetPair": _assets[1]}
for asset in available_assets:
if asset['assetName'] == self.quote['assetPair']:
self.quote.update({
"id": asset["assetId"],
"precision": asset["precision"]
})
if asset['assetName'] == self.base['assetPair']:
self.base.update({
"id": asset["assetId"],
"precision": asset["precision"]
})
def get_dict(self, side, quantity, price):
if side == 'buy':
return self.buy(quantity, price)
if side == 'sell':
return self.sell(quantity, price)
return None
def sell(self, amount, price):
return {
"amount_to_sell": {
"amount": int(
round(float(amount) * 10 ** self.quote["precision"])
),
"asset_id": self.base["id"]
},
"min_to_receive": {
"amount": int(
round(
float(amount)
* float(price)
* 10 ** self.base["precision"]
)
),
"asset_id": self.quote["id"]
}
}
def buy(self, amount, price):
return {
"amount_to_sell": {
"amount": int(
round(float(amount) * float(price) * 10 ** self.quote["precision"])
),
"asset_id": self.quote["id"]
},
"min_to_receive": {
"amount": int(
round(
float(amount) * 10 ** self.base["precision"]
)
),
"asset_id": self.base["id"]
}
}
def get_block_params(ref_block_id):
ref_block_num = int(ref_block_id[:8], 16) & 0xFFFF
ref_block_prefix = struct.unpack_from("<I", unhexlify(ref_block_id), 4)[0]
return ref_block_num, ref_block_prefix
def time_string(time_param):
if isinstance(time_param, datetime):
return time_param.strftime(TS_FORMAT)
return time_param
class Signer:
def __init__(self, account, private_key, refData):
self.account = account
self.private_key = private_key
self.chain = {'chain_id': refData['chainId'], 'core_symbol': 'CYB', 'prefix': 'CYB'}
self.fees = refData['fees']
self.pairs = refData['availableAssets']
self.ref_block_num, self.ref_block_prefix = get_block_params(refData['refBlockId'])
def signed(self, op, tx_expiration):
tx = signedtransactions.Signed_Transaction(
operations=[op],
ref_block_num=self.ref_block_num,
ref_block_prefix=self.ref_block_prefix,
expiration=tx_expiration
)
tx.sign([self.private_key], self.chain)
return tx
def prepare_order_message(self, asset_pair, side, quantity, price):
if side != 'buy' and side != 'sell':
print("Unsupported side", side)
return None
is_buy = side == 'buy'
# time calculation
utcnow = datetime.utcnow()
# calculate the time difference between utc now and utc end of day
exp_utc = datetime(utcnow.year, utcnow.month, utcnow.day, 23, 59, 59)
# this is the local time, use to timestamp to calculate utc timestamp
exp = datetime.now() + (exp_utc - utcnow)
pair = Pair(asset_pair, self.pairs)
buy_sell = pair.get_dict(side, quantity, price)
op_data = {
"fee": {'amount': self.fees['newFee'], 'asset_id': self.fees['feeAssetId']},
"seller": self.account,
"amount_to_sell": buy_sell['amount_to_sell'],
"min_to_receive": buy_sell['min_to_receive'],
"expiration": exp_utc.strftime(TS_FORMAT),
"fill_or_kill": False,
}
op = operations.Limit_order_create(**op_data)
tx_expiration_utc_tiemstamp = time.time() + 3600 * 23
tx_expiration = datetime.utcfromtimestamp(tx_expiration_utc_tiemstamp)
signed_tx = self.signed(op, tx_expiration.strftime(TS_FORMAT))
signed_tx_json = signed_tx.json()
fee = {
'assetId': signed_tx_json['operations'][0][1]['fee']['asset_id'],
'amount': signed_tx_json['operations'][0][1]['fee']['amount']
}
amountToSell = {
'assetId': signed_tx_json['operations'][0][1]['amount_to_sell']['asset_id'],
'amount': signed_tx_json['operations'][0][1]['amount_to_sell']['amount']
}
minToReceive = {
'assetId': signed_tx_json['operations'][0][1]['min_to_receive']['asset_id'],
'amount': signed_tx_json['operations'][0][1]['min_to_receive']['amount']
}
order_msg = {
'transactionType': 'NewLimitOrder',
'transactionId': signed_tx.id,
'refBlockNum': signed_tx_json['ref_block_num'],
'refBlockPrefix': signed_tx_json['ref_block_prefix'],
'txExpiration': int(tx_expiration_utc_tiemstamp),
'fee': fee,
'seller': signed_tx_json['operations'][0][1]['seller'],
'amountToSell': amountToSell,
'minToReceive': minToReceive,
'expiration': int(exp.timestamp()),
'fill_or_kill': int(signed_tx_json['operations'][0][1]['fill_or_kill']),
'signature': signed_tx_json['signatures'][0],
'is_buy': int(is_buy)
}
return order_msg
def prepare_cancel_message(self, trxid):
op = operations.Limit_order_cancel(**{
'fee': {'amount': self.fees['cancelFee'], 'asset_id': self.fees['feeAssetId']},
'fee_paying_account': self.account,
'order': '1.7.0',
'extensions': [[6, {'trx_id': trxid}]]})
tx_expiration_utc_tiemstamp = time.time() + 3600 * 23
tx_expiration = datetime.utcfromtimestamp(tx_expiration_utc_tiemstamp)
signed_tx = self.signed(op, tx_expiration.strftime(TS_FORMAT))
signed_tx_json = signed_tx.json()
fee = {
'assetId': signed_tx_json['operations'][0][1]['fee']['asset_id'],
'amount': signed_tx_json['operations'][0][1]['fee']['amount']
}
cancel_msg = {
'transactionType': 'Cancel',
'transactionId': signed_tx.id,
'originalTransactionId': trxid,
'refBlockNum': signed_tx_json['ref_block_num'],
'refBlockPrefix': signed_tx_json['ref_block_prefix'],
'txExpiration': int(tx_expiration_utc_tiemstamp),
'orderId': '0',
'fee': fee,
'feePayingAccount': self.account,
'signature': signed_tx_json['signatures'][0]
}
print(cancel_msg)
return cancel_msg
def prepare_cancel_all_message(self, asset_pair):
pair = Pair(asset_pair, self.pairs)
op = operations.Cancel_all(**{
"fee": {'amount': self.fees['cancelAllFee'], 'asset_id': self.fees['feeAssetId']},
"seller": self.account,
"sell_asset_id": pair.quote['id'],
"receive_asset_id": pair.base['id']
})
tx_expiration_utc_tiemstamp = time.time() + 3600 * 23
tx_expiration = datetime.utcfromtimestamp(tx_expiration_utc_tiemstamp)
signed_tx = self.signed(op, tx_expiration.strftime(TS_FORMAT))
signed_tx_json = signed_tx.json()
fee = {
'assetId': signed_tx_json['operations'][0][1]['fee']['asset_id'],
'amount': signed_tx_json['operations'][0][1]['fee']['amount']
}
cancel_all_msg = {
'transactionType': 'CancelAll',
'transactionId': signed_tx.id,
'refBlockNum': signed_tx_json['ref_block_num'],
'refBlockPrefix': signed_tx_json['ref_block_prefix'],
'txExpiration': int(tx_expiration_utc_tiemstamp),
'fee': fee,
'seller': self.account,
'sellAssetId': pair.quote['id'],
'recvAssetId': pair.base['id'],
'signature': signed_tx_json['signatures'][0]
}
print(cancel_all_msg)
return cancel_all_msg
def _handle_response(self, response):
# Return the json object if there is no error
if not str(response.status_code).startswith('2'):
raise CybexAPIException(response)
try:
data = response.json()
if 'Status' in data and data['Status'] == 'Failed':
msg = 'Unknown error'
if 'Message' in data:
msg = data['Message']
raise CybexSignerException(data[msg])
return data
except ValueError:
raise CybexSignerException('Invalid Response: %s' % response.text)
# order_msg = signer.prepare_order_message(asset_pair='ETH/USDT', price=80, quantity=0.1, side='buy')
class Cybex:
"""Cybex Restful API implementation
"""
prod_api_endpoint_root = "https://api.cybex.io/v1"
uat_api_endpoint_root = "https://apitest.cybex.io/v1"
prod_chain_endpoint = "https://hongkong.cybex.io/"
def __init__(self, accountName, key, account=None, env='prod', timeout=None):
self.accountName = accountName
self.account = self._find_account(accountName)
if env == 'prod':
self.api_root = self.prod_api_endpoint_root
elif env == 'uat':
self.api_root = self.uat_api_endpoint_root
self.timeout = timeout
self.markets = []
# Prepare HTTPS session
self.session = requests.Session()
self.session.headers.update({'content-type': 'application/json', 'accept': 'application/json'})
self._load()
self.signer = Signer(account, key, self.refData)
def _load(self):
url = "%s/refData" % self.api_root
self.refData = self._handle_response(requests.get(url))
def load_markets(self, reload=False):
if reload:
self._load()
return self.refData.get('availableAssetPairs', [])
@property
def assetPairs(self):
return [mkt['name'] for mkt in self.markets]
def market(self, assetPair):
if assetPair in self.markets:
return self.markets[assetPair]
return None
def _find_account(self, accountName):
url = self.prod_chain_endpoint+"rpc"
data = {"method": "call", "params": [0, "lookup_accounts",[accountName, 50]], "id": 1}
res = requests.get(url, json=data)
result = res.json()
if 'result' in result:
for acc in result['result']:
if accountName == acc[0]:
return acc[1]
def fetch_ticker(self, assetPair):
url = self.prod_chain_endpoint
if '/' in assetPair:
params = assetPair.split('/')
newparams = []
for asset in params:
if asset != 'CYB' and '.' not in asset:
asset = 'JADE.' + asset
newparams.append(asset)
data = {"jsonrpc": "2.0", "method": "get_ticker", "params": newparams, "id": 1}
return self._handle_response(requests.get(url, json=data))
def fetch_order_book(self, assetPair, limit=3):
url = '%s/orderBook' % self.api_root
params = {'assetPair': assetPair, 'limit': limit}
return self._handle_response(requests.get(url, params=params))
def fetch_best_price(self, assetPair):
result = self.fetch_order_book(assetPair)
return float(result['bids'][0][0]), float(result['asks'][0][0])
def _send_transaction(self, data):
url = "%s/transaction" % self.api_root
headers = {'Content-type': 'application/json'}
return self._handle_response(requests.post(url, json=data, headers=headers))
def _handle_response(self, response):
# Return the json object if there is no error
if not str(response.status_code).startswith('2'):
raise CybexAPIException(response)
try:
data = response.json()
if 'Status' in data and data['Status'] == 'Failed':
msg = 'Unknown error.'
if 'Message' in data:
msg = data['Message']
if 'rejectReason' in data:
msg = data['rejectReason']
raise CybexRequestException(msg)
if 'josnrpc' in data and 'result' in data:
return data['result']
return data
except ValueError:
raise CybexRequestException('Invalid Response: %s' % response.text)
def create_order(self, assetPair, side, quantity, price):
order_msg = self.signer.prepare_order_message(assetPair, side, quantity, price)
print('order_msg', order_msg)
trx_id = order_msg['transactionId']
result = self._send_transaction(order_msg)
return trx_id, result
def create_limit_buy_order(self, assetPair, quantity, price):
return self.create_order(assetPair, 'buy', quantity, price)
def create_limit_sell_order(self, assetPair, quantity, price):
return self.create_order(assetPair, 'sell', quantity, price)
def create_market_buy_order(self, assetPair, quantity):
bid, ask = self.fetch_best_price(assetPair)
# put some buffer in price
return self.create_order(assetPair, 'buy', quantity, ask * 1.01)
def create_market_sell_order(self, assetPair, quantity):
bid, ask = self.fetch_best_price(assetPair)
# put some buffer in price
return self.create_order(assetPair, 'sell', quantity, bid * 0.99)
def fetch_balance(self):
url = "%s/position" % self.api_root
payload = {'accountName': self.accountName}
return self._handle_response(requests.get(url, params=payload))
def cancel_order(self, id):
cancel_msg = self.signer.prepare_cancel_message(id)
cancel_result = self._send_transaction(cancel_msg)
return cancel_result
def cancel_all(self, assetPair):
cancel_all_msg = self.signer.prepare_cancel_all_message(assetPair)
cancel_all_result = self._send_transaction(cancel_all_msg)
return cancel_all_result
def fetch_order(self, id):
url = "%s/order" % self.api_root
payload = {'accountName': self.accountName, 'transactionId': id}
return self._handle_response(requests.get(url, params=payload))
def fetch_orders(self, assetPair, reverse=True):
url = "%s/order" % self.api_root
payload = {'accountName': self.accountName, 'assetPair': assetPair, "reverse": int(reverse)}
return self._handle_response(requests.get(url, params=payload))
def fetch_open_orders(self, since=None, reverse=True):
url = "%s/order" % self.api_root
payload = {'accountName': self.accountName, 'orderStatus': 'OPEN', "reverse": int(reverse)}
if since:
payload['startTime'] = time_string(since)
return self._handle_response(requests.get(url, params=payload))
def fetch_closed_orders(self, since=None, reverse=True):
url = "%s/order" % self.api_root
payload = {'accountName': self.accountName, 'orderStatus': 'CANCELED, REJECTED', "reverse": int(reverse)}
if since:
payload['startTime'] = time_string(since)
return self._handle_response(requests.get(url, params=payload))
def fetch_my_trades(self, assetPair=None, since=None, reverse=True):
url = "%s/trade" % self.api_root
payload = {'accountName': self.accountName, "reverse": int(reverse)}
if assetPair:
payload['assetPair'] = assetPair
if since:
payload['startTime'] = time_string(since)
return self._handle_response(requests.get(url, params=payload))
def fetch_trades(self, assetPair, limit=20, reverse=True):
url = "%s/trade" % self.api_root
payload = {'assetPair': assetPair, "limit": limit, "reverse": int(reverse)}
return self._handle_response(requests.get(url, params=payload))
|
PypiClean
|
/jupyterlab_blockly-0.3.0a1.tar.gz/jupyterlab_blockly-0.3.0a1/packages/blockly-extension/node_modules/minimatch/dist/mjs/brace-expressions.js
|
const posixClasses = {
'[:alnum:]': ['\\p{L}\\p{Nl}\\p{Nd}', true],
'[:alpha:]': ['\\p{L}\\p{Nl}', true],
'[:ascii:]': ['\\x' + '00-\\x' + '7f', false],
'[:blank:]': ['\\p{Zs}\\t', true],
'[:cntrl:]': ['\\p{Cc}', true],
'[:digit:]': ['\\p{Nd}', true],
'[:graph:]': ['\\p{Z}\\p{C}', true, true],
'[:lower:]': ['\\p{Ll}', true],
'[:print:]': ['\\p{C}', true],
'[:punct:]': ['\\p{P}', true],
'[:space:]': ['\\p{Z}\\t\\r\\n\\v\\f', true],
'[:upper:]': ['\\p{Lu}', true],
'[:word:]': ['\\p{L}\\p{Nl}\\p{Nd}\\p{Pc}', true],
'[:xdigit:]': ['A-Fa-f0-9', false],
};
// only need to escape a few things inside of brace expressions
// escapes: [ \ ] -
const braceEscape = (s) => s.replace(/[[\]\\-]/g, '\\$&');
// escape all regexp magic characters
const regexpEscape = (s) => s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&');
// everything has already been escaped, we just have to join
const rangesToString = (ranges) => ranges.join('');
// takes a glob string at a posix brace expression, and returns
// an equivalent regular expression source, and boolean indicating
// whether the /u flag needs to be applied, and the number of chars
// consumed to parse the character class.
// This also removes out of order ranges, and returns ($.) if the
// entire class just no good.
export const parseClass = (glob, position) => {
const pos = position;
/* c8 ignore start */
if (glob.charAt(pos) !== '[') {
throw new Error('not in a brace expression');
}
/* c8 ignore stop */
const ranges = [];
const negs = [];
let i = pos + 1;
let sawStart = false;
let uflag = false;
let escaping = false;
let negate = false;
let endPos = pos;
let rangeStart = '';
WHILE: while (i < glob.length) {
const c = glob.charAt(i);
if ((c === '!' || c === '^') && i === pos + 1) {
negate = true;
i++;
continue;
}
if (c === ']' && sawStart && !escaping) {
endPos = i + 1;
break;
}
sawStart = true;
if (c === '\\') {
if (!escaping) {
escaping = true;
i++;
continue;
}
// escaped \ char, fall through and treat like normal char
}
if (c === '[' && !escaping) {
// either a posix class, a collation equivalent, or just a [
for (const [cls, [unip, u, neg]] of Object.entries(posixClasses)) {
if (glob.startsWith(cls, i)) {
// invalid, [a-[] is fine, but not [a-[:alpha]]
if (rangeStart) {
return ['$.', false, glob.length - pos, true];
}
i += cls.length;
if (neg)
negs.push(unip);
else
ranges.push(unip);
uflag = uflag || u;
continue WHILE;
}
}
}
// now it's just a normal character, effectively
escaping = false;
if (rangeStart) {
// throw this range away if it's not valid, but others
// can still match.
if (c > rangeStart) {
ranges.push(braceEscape(rangeStart) + '-' + braceEscape(c));
}
else if (c === rangeStart) {
ranges.push(braceEscape(c));
}
rangeStart = '';
i++;
continue;
}
// now might be the start of a range.
// can be either c-d or c-] or c<more...>] or c] at this point
if (glob.startsWith('-]', i + 1)) {
ranges.push(braceEscape(c + '-'));
i += 2;
continue;
}
if (glob.startsWith('-', i + 1)) {
rangeStart = c;
i += 2;
continue;
}
// not the start of a range, just a single character
ranges.push(braceEscape(c));
i++;
}
if (endPos < i) {
// didn't see the end of the class, not a valid class,
// but might still be valid as a literal match.
return ['', false, 0, false];
}
// if we got no ranges and no negates, then we have a range that
// cannot possibly match anything, and that poisons the whole glob
if (!ranges.length && !negs.length) {
return ['$.', false, glob.length - pos, true];
}
// if we got one positive range, and it's a single character, then that's
// not actually a magic pattern, it's just that one literal character.
// we should not treat that as "magic", we should just return the literal
// character. [_] is a perfectly valid way to escape glob magic chars.
if (negs.length === 0 &&
ranges.length === 1 &&
/^\\?.$/.test(ranges[0]) &&
!negate) {
const r = ranges[0].length === 2 ? ranges[0].slice(-1) : ranges[0];
return [regexpEscape(r), false, endPos - pos, false];
}
const sranges = '[' + (negate ? '^' : '') + rangesToString(ranges) + ']';
const snegs = '[' + (negate ? '' : '^') + rangesToString(negs) + ']';
const comb = ranges.length && negs.length
? '(' + sranges + '|' + snegs + ')'
: ranges.length
? sranges
: snegs;
return [comb, uflag, endPos - pos, true];
};
//# sourceMappingURL=brace-expressions.js.map
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/models/device_category_collection_response.py
|
from __future__ import annotations
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import base_collection_pagination_count_response, device_category
from . import base_collection_pagination_count_response
class DeviceCategoryCollectionResponse(base_collection_pagination_count_response.BaseCollectionPaginationCountResponse):
def __init__(self,) -> None:
"""
Instantiates a new DeviceCategoryCollectionResponse and sets the default values.
"""
super().__init__()
# The value property
self._value: Optional[List[device_category.DeviceCategory]] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DeviceCategoryCollectionResponse:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parseNode: The parse node to use to read the discriminator value and create the object
Returns: DeviceCategoryCollectionResponse
"""
if parse_node is None:
raise Exception("parse_node cannot be undefined")
return DeviceCategoryCollectionResponse()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from . import base_collection_pagination_count_response, device_category
fields: Dict[str, Callable[[Any], None]] = {
"value": lambda n : setattr(self, 'value', n.get_collection_of_object_values(device_category.DeviceCategory)),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if writer is None:
raise Exception("writer cannot be undefined")
super().serialize(writer)
writer.write_collection_of_object_values("value", self.value)
@property
def value(self,) -> Optional[List[device_category.DeviceCategory]]:
"""
Gets the value property value. The value property
Returns: Optional[List[device_category.DeviceCategory]]
"""
return self._value
@value.setter
def value(self,value: Optional[List[device_category.DeviceCategory]] = None) -> None:
"""
Sets the value property value. The value property
Args:
value: Value to set for the value property.
"""
self._value = value
|
PypiClean
|
/openepda-0.1.20.tar.gz/openepda-0.1.20/docs/source/data/openepda_data_format.rst
|
.. _openepda_data_format:
====================
openEPDA data format
====================
About openEPDA data format
==========================
Summary
-------
With the development of fab-less approach to integrated photonics, there is
an increasing demand on infrastructure for handling measurement, analysis
and simulation data. In order to facilitate data exchange between parties we
fill the gap in the data file formats and describe an open standard for
file format, which is human readable and at the same time allows convenient
storage and manipulation of heterogeneous measurement data.
Motivation
----------
In integrated electronics and photonics various data in different
formats is generated on multiple stages of the fabrication chain:
design, simulations, fabrication, testing, and device characterization.
For example, testing is performed on different stages of the fabrication:
from on-wafer testing during and after the fabrication to module-level
testing of the packaged devices. The data generated during these measurements is
heterogeneous, and is used for different purposes: pass-fail procedure,
process control, device models development and calibration. This involves
various parties, such as foundry, measurement labs, designers, and software
companies which use different tools to generate and process the test data.
Data types
==========
The generated data comes from different sources and is heterogeneous.
The main data is obtained from the measurement equipment directly when
the observation is performed. This data is usually numeric (scalar or arrays).
The identifiers of the wafer, die and circuit under test represent metadata
for the given observation. Metadata also may include the equipment used,
the equipment settings, date of calibration, ambient conditions, etc.
The metadata can be of various simple (numeric, textual) and structured
types (arrays, maps). The overview of the data types is presented in the
table below.
+-----------+-----------------------------+---------------------------------+------------------------------+
| Data type | Description | Examples | Remarks |
+===========+=============================+=================================+==============================+
| Number | Any numeric value | 1 | Representation is same |
| | | 2.3 | as in section 10.2.1.4 |
| | | .inf | of [1] |
| | | 1.9e-3 | |
+-----------+-----------------------------+---------------------------------+------------------------------+
| | A list of characters | 'spectrum analyzer' | |
| String | | 'SPM18-3' | |
+-----------+-----------------------------+---------------------------------+------------------------------+
| Array | A sorted list of numeric | [1, 2, 3, 4] | Values may have mixed types, |
| | or string values | ['voltage', 'current', 'power'] | which is discouraged |
+-----------+-----------------------------+---------------------------------+------------------------------+
| Map | Mapping of a set of values | {'wafer': 'SPM18-3', | Also called a named array, |
| | to another set of values | 'die': '38X23', | a look-up table, or a |
| | in the ``key: value`` form | 'design': 'SP00-38'} | dictionary |
+-----------+-----------------------------+---------------------------------+------------------------------+
To facilitate exchange of the data between these parties, we have developed
a standard file data format, which can store the measurement data and metadata
in a human-readable way. The format is sufficiently flexible to include any
arbitrary structured data, including arrays and maps. The generated files are
straightforward to be imported by any software or analysis tool, for example
MatLab, python, and Excel.
License
=======
openEPDA data formats are available under CC BY-SA 4.0 license.
This is Creative Commons Attribution-ShareAlike 4.0 International
(CC BY-SA 4.0). See full license text here:
`CC BY-SA 4.0 <https://creativecommons.org/licenses/by-sa/4.0/legalcode>`_.
More details on openEPDA view on standards licensing are available on
:ref:`Licensing policy <licensing_policy>` page.
Specification
=============
The data format defines how to write the information into a file. Besides
this, YAML section contains reserved keys which start with the underscore
symbol. The list of reserved keys is given for a particular format version.
.. toctree::
:maxdepth: 1
0.2/format_specification
0.1/format_specification
|
PypiClean
|
/NeuralPlayground-0.0.7.tar.gz/NeuralPlayground-0.0.7/neuralplayground/experiments/sargolini_2006_data.py
|
import glob
import os.path
import numpy as np
import scipy.io as sio
import neuralplayground
from neuralplayground.datasets import fetch_data_path
from neuralplayground.experiments import Experiment, Hafting2008Data
from neuralplayground.utils import clean_data
class SargoliniDataTrajectory(Experiment):
"""Data class for sargolini et al. 2006. https://www.science.org/doi/10.1126/science.1125572
The data can be obtained from https://archive.norstore.no/pages/public/datasetDetail.jsf?id=8F6BE356-3277-475C-87B1-C7A977632DA7
This class only consider animal trajectory pre-processed by the authors.
"""
def __init__(
self,
experiment_name: str = "Sargolini_2006_Data",
data_path: str = None,
data_url: str = None,
paper_url: str = None,
**kwargs,
):
"""SargoliniData Class Init
Parameters
----------
experiment_name: str
string to identify object in case of multiple instances
data_path: str
if None, fetch the data from the NeuralPlayground data repository,
else load data from given path
"""
if data_url is None:
data_url = "https://archive.norstore.no/pages/public/datasetDetail.jsf?id=8F6BE356-3277-475C-87B1-C7A977632DA7"
if paper_url is None:
paper_url = "https://www.science.org/doi/10.1126/science.1125572"
super().__init__(experiment_name=experiment_name, data_url=data_url, paper_url=paper_url)
self.recording_list = []
if data_path is None:
# Set data_path to the data directory within the package
self.data_path = fetch_data_path("sargolini_2006")
else:
self.data_path = data_path
# Sort the data in data_path
(
self.arena_limits,
self.position,
self.head_direction,
self.time_span,
) = self._get_sargolini_data()
def _get_sargolini_data(self, tolerance: float = 1e-10):
"""Load and concatenate animal trajectories from data pre-processed by the authors
Parameters
----------
tolerance: float
Small constant to avoid dividing by zero when estimating head direction
Returns
-------
arena_limits: ndarray (2, 2)
first row x limits of the arena, second row y limits of the arena, in cm
position: ndarray (n, 2)
first column is x pos in cm, second column is y pos in cm, n is the number of sampled positions
head_direction: ndarray (n-1, 2)
directional vector with estimated head direction from position derivative
time_span: ndarray (n, )
time stamp per measurement assuming 50Hz of sampling rate
"""
# Arena limits in cm, sampling rate in Hz, both from the original experiment
arena_limits = np.array([[-50, 50], [-50, 50]])
self.sampling_rate = 50
# Listing files with trajectories
filenames_x = os.path.join(self.data_path, "sargolini_x_pos_")
filenames_y = os.path.join(self.data_path, "sargolini_y_pos_")
# Filing array from files
x_position = np.array([])
y_position = np.array([])
for i in range(61):
aux_x = np.load(filenames_x + str(i) + ".npy")
aux_y = np.load(filenames_y + str(i) + ".npy")
x_position = np.concatenate([x_position, aux_x])
y_position = np.concatenate([y_position, aux_y])
position = np.stack([x_position, y_position], axis=1) * 100 # Convert to cm, originally in meters
head_direction = np.diff(position, axis=0) # Head direction from derivative of position
head_direction = head_direction / np.sqrt(np.sum(head_direction**2, axis=1) + tolerance)[..., np.newaxis]
time_span = np.arange(head_direction.shape[0]) * (1 / self.sampling_rate)
return arena_limits, position, head_direction, time_span
class Sargolini2006Data(Hafting2008Data):
"""Data class for sargolini et al. 2006. https://www.science.org/doi/10.1126/science.1125572
The data can be obtained from https://archive.norstore.no/pages/public/datasetDetail.jsf?id=8F6BE356-3277-475C-87B1-C7A977632DA7
This class only consider animal raw animal trajectories and neural recordings.
Refer to Hafting2008Data for more information into the available methods and attributes.
"""
def __init__(
self,
data_path: str = None,
recording_index: int = None,
experiment_name: str = "FullSargoliniData",
verbose: bool = False,
data_url: str = None,
paper_url: str = None,
):
"""Sargolini2006Data init, just initializing parent class Hafting2008Data
Parameters
----------
data_path: str
if None, fetch the data from the NeuralPlayground data repository,
else load data from given path
recording_index: int
if None, load data from default recording index
experiment_name: str
string to identify object in case of multiple instances
verbose:
if True, it will print original readme and data structure when initializing this object
data_url: str
URL to the data used in the experiment, make sure it is publicly available for usage and download
paper_url: str
URL to the paper describing the experiment
"""
if data_url is None:
data_url = "https://archive.norstore.no/pages/public/datasetDetail.jsf?id=8F6BE356-3277-475C-87B1-C7A977632DA7"
if paper_url is None:
paper_url = "https://www.science.org/doi/10.1126/science.1125572"
super().__init__(
data_path=data_path,
recording_index=recording_index,
experiment_name=experiment_name,
verbose=verbose,
data_url=data_url,
paper_url=paper_url,
)
def _find_data_path(self, data_path: str):
"""Fetch data from NeuralPlayground data repository
if no data path is supplied by the user"""
if data_path is None:
self.data_path = fetch_data_path("sargolini_2006") + "raw_data_sample/"
else:
self.data_path = data_path + "raw_data_sample/"
def _load_data(self):
"""Parse data according to specific data format
if you are a user check the notebook examples"""
self.best_recording_index = 0 # Nice session recording as default
# Arena limits from the experimental setting, first row x limits, second row y limits, in cm
self.arena_limits = np.array([[-50.0, 50.0], [-50.0, 50.0]])
data_path_list = glob.glob(os.path.join(self.data_path, "*.mat"))
mice_ids = np.unique([os.path.basename(dp)[:5] for dp in data_path_list])
# Initialize data dictionary, later handled by this object itself (so don't worry about this)
self.data_per_animal = {}
for m_id in mice_ids:
m_paths_list = glob.glob(self.data_path + m_id + "*.mat")
sessions = np.unique([dp.split("/")[-1].split("-")[1][:8] for dp in m_paths_list]).astype(str)
self.data_per_animal[m_id] = {}
for sess in sessions:
s_paths_list = glob.glob(self.data_path + m_id + "-" + sess + "*.mat")
cell_ids = np.unique([dp.split("/")[-1].split(".")[-2][-4:] for dp in s_paths_list]).astype(str)
self.data_per_animal[m_id][sess] = {}
for cell_id in cell_ids:
if cell_id == "_POS":
session_info = "position"
elif cell_id in ["_EEG", "_EGF"]:
session_info = cell_id[1:]
else:
session_info = cell_id
r_path = glob.glob(self.data_path + m_id + "-" + sess + "*" + cell_id + "*.mat")
# Interpolate to replace NaNs and stuff
cleaned_data = clean_data(sio.loadmat(r_path[0]))
if cell_id != "_POS" and cell_id not in ["_EEG", "_EGF"]:
try:
self.data_per_animal[m_id][sess][session_info] = cleaned_data["cellTS"]
except Exception:
pass
else:
self.data_per_animal[m_id][sess][session_info] = cleaned_data
def get_tetrode_data(self, session_data: str = None, tetrode_id: str = None):
"""Return time stamp, position and spikes for a given session and tetrode
Parameters
----------
session_data: str
if None, the session used corresponds to the default recording index
tetrode_id:
tetrode id in the corresponding session
Returns
-------
time_array: ndarray (n_samples,)
array with the timestamps in seconds per position of the given session
test_spikes: ndarray (n_spikes,)
spike times in seconds of the given session
x: ndarray (n_samples,)
x position throughout recording of the given session
y: ndarray (n_samples,)
y position throughout recording of the given session
"""
if session_data is None:
session_data, rev_vars, rat_info = self.get_recording_data(recording_index=0)
tetrode_id = self._find_tetrode(rev_vars)
position_data = session_data["position"]
x1, y1 = position_data["posx"][:, 0], position_data["posy"][:, 0]
x2, y2 = x1, y1
# Selecting positional data
x = np.clip(x2, a_min=self.arena_limits[0, 0], a_max=self.arena_limits[0, 1])
y = np.clip(y2, a_min=self.arena_limits[1, 0], a_max=self.arena_limits[1, 1])
time_array = position_data["post"][:]
tetrode_data = session_data[tetrode_id]
test_spikes = tetrode_data[:, 0]
time_array = time_array[:, 0]
return time_array, test_spikes, x, y
if __name__ == "__main__":
# print("initializing hafting")
# data = FullHaftingData(verbose=True)
# print("plotting_tragectory")
# data.plot_trajectory(2)
# print("plotting_recording")
# data.plot_recording_tetr(2)
# plt.show()
print("initializing sargolini")
# data = FullSargoliniData(verbose=True)
# print("plotting_tragectory")
# data.plot_trajectory(2)
# print("plotting_recording")
# data.plot_recording_tetr(2)
# plt.show()
|
PypiClean
|
/eurotools-0.5.4.tar.gz/eurotools-0.5.4/eurocharts/static/lib/js/js/modules/accessibility.js
|
(function(k){"object"===typeof module&&module.exports?module.exports=k:k(Highcharts)})(function(k){(function(e){function k(a){for(var b=a.childNodes.length;b--;)a.appendChild(a.childNodes[b])}function t(a){var b;a&&a.onclick&&(b=m.createEvent("Events"),b.initEvent("click",!0,!1),a.onclick(b))}var w=e.win,m=w.document,h=e.each,y=e.erase,v=e.addEvent,z=e.removeEvent,A=e.fireEvent,B=e.dateFormat,u=e.merge,p={"default":["series","data point","data points"],line:["line","data point","data points"],spline:["line",
"data point","data points"],area:["line","data point","data points"],areaspline:["line","data point","data points"],pie:["pie","slice","slices"],column:["column series","column","columns"],bar:["bar series","bar","bars"],scatter:["scatter series","data point","data points"],boxplot:["boxplot series","box","boxes"],arearange:["arearange series","data point","data points"],areasplinerange:["areasplinerange series","data point","data points"],bubble:["bubble series","bubble","bubbles"],columnrange:["columnrange series",
"column","columns"],errorbar:["errorbar series","errorbar","errorbars"],funnel:["funnel","data point","data points"],pyramid:["pyramid","data point","data points"],waterfall:["waterfall series","column","columns"],map:["map","area","areas"],mapline:["line","data point","data points"],mappoint:["point series","data point","data points"],mapbubble:["bubble series","bubble","bubbles"]},C={boxplot:" Box plot charts are typically used to display groups of statistical data. Each data point in the chart can have up to 5 values: minimum, lower quartile, median, upper quartile and maximum. ",
arearange:" Arearange charts are line charts displaying a range between a lower and higher value for each point. ",areasplinerange:" These charts are line charts displaying a range between a lower and higher value for each point. ",bubble:" Bubble charts are scatter charts where each data point also has a size value. ",columnrange:" Columnrange charts are column charts displaying a range between a lower and higher value for each point. ",errorbar:" Errorbar series are used to display the variability of the data. ",
funnel:" Funnel charts are used to display reduction of data in stages. ",pyramid:" Pyramid charts consist of a single pyramid with item heights corresponding to each point value. ",waterfall:" A waterfall chart is a column chart where each column contributes towards a total end value. "},D="name id category x value y".split(" "),x="z open high q3 median q1 low close".split(" ");e.setOptions({accessibility:{enabled:!0,pointDescriptionThreshold:30,keyboardNavigation:{enabled:!0}}});e.wrap(e.Series.prototype,
"render",function(a){a.apply(this,Array.prototype.slice.call(arguments,1));this.chart.options.accessibility.enabled&&this.setA11yDescription()});e.Series.prototype.setA11yDescription=function(){var a=this.chart.options.accessibility,b=this.points&&this.points[0].graphic&&this.points[0].graphic.element,d=b&&b.parentNode||this.graph&&this.graph.element||this.group&&this.group.element;d&&(d.lastChild===b&&k(d),this.points&&(this.points.length<a.pointDescriptionThreshold||!1===a.pointDescriptionThreshold)&&
h(this.points,function(c){c.graphic&&(c.graphic.element.setAttribute("role","img"),c.graphic.element.setAttribute("tabindex","-1"),c.graphic.element.setAttribute("aria-label",a.pointDescriptionFormatter&&a.pointDescriptionFormatter(c)||c.buildPointInfoString()))}),1<this.chart.series.length||a.describeSingleSeries)&&(d.setAttribute("role","region"),d.setAttribute("tabindex","-1"),d.setAttribute("aria-label",a.seriesDescriptionFormatter&&a.seriesDescriptionFormatter(this)||this.buildSeriesInfoString()))};
e.Series.prototype.buildSeriesInfoString=function(){var a=p[this.type]||p["default"],b=this.description||this.options.description;return(this.name?this.name+", ":"")+(1===this.chart.types.length?a[0]:"series")+" "+(this.index+1)+" of "+this.chart.series.length+(1===this.chart.types.length?" with ":". "+a[0]+" with ")+(this.points.length+" "+(1===this.points.length?a[1]:a[2]))+(b?". "+b:"")+(1<this.chart.yAxis.length&&this.yAxis?". Y axis, "+this.yAxis.getDescription():"")+(1<this.chart.xAxis.length&&
this.xAxis?". X axis, "+this.xAxis.getDescription():"")};e.Point.prototype.buildPointInfoString=function(){var a=this,b=a.series,d=b.chart.options.accessibility,c="",f=!1,g=b.xAxis&&b.xAxis.isDatetimeAxis,b=g&&B(d.pointDateFormatter&&d.pointDateFormatter(a)||d.pointDateFormat||e.Tooltip.prototype.getXDateFormat(a,b.chart.options.tooltip,b.xAxis),a.x);h(x,function(c){void 0!==a[c]&&(f=!0)});f?(g&&(c=b),h(D.concat(x),function(b){void 0===a[b]||g&&"x"===b||(c+=(c?". ":"")+b+", "+this[b])})):c=(this.name||
b||this.category||this.id||"x, "+this.x)+", "+(void 0!==this.value?this.value:this.y);return this.index+1+". "+c+"."+(this.description?" "+this.description:"")};e.Axis.prototype.getDescription=function(){return this.userOptions&&this.userOptions.description||this.axisTitle&&this.axisTitle.textStr||this.options.id||this.categories&&"categories"||"values"};e.Axis.prototype.panStep=function(a,b){var d=b||3,c=this.getExtremes(),f=(c.max-c.min)/d*a,d=c.max+f,f=c.min+f,g=d-f;0>a&&f<c.dataMin?(f=c.dataMin,
d=f+g):0<a&&d>c.dataMax&&(d=c.dataMax,f=d-g);this.setExtremes(f,d)};e.wrap(e.Series.prototype,"init",function(a){a.apply(this,Array.prototype.slice.call(arguments,1));var b=this.chart;b.options.accessibility.enabled&&(b.types=b.types||[],0>b.types.indexOf(this.type)&&b.types.push(this.type),v(this,"remove",function(){var a=this,c=!1;h(b.series,function(f){f!==a&&0>b.types.indexOf(a.type)&&(c=!0)});c||y(b.types,a.type)}))});e.Chart.prototype.getTypeDescription=function(){var a=this.types&&this.types[0],
b=this.series[0]&&this.series[0].mapTitle;if(a){if("map"===a)return b?"Map of "+b:"Map of unspecified region.";if(1<this.types.length)return"Combination chart.";if(-1<["spline","area","areaspline"].indexOf(a))return"Line chart."}else return"Empty chart.";return a+" chart."+(C[a]||"")};e.Chart.prototype.getAxesDescription=function(){var a=this.xAxis.length,b=this.yAxis.length,d={},c;if(a)if(d.xAxis="The chart has "+a+(1<a?" X axes":" X axis")+" displaying ",2>a)d.xAxis+=this.xAxis[0].getDescription()+
".";else{for(c=0;c<a-1;++c)d.xAxis+=(c?", ":"")+this.xAxis[c].getDescription();d.xAxis+=" and "+this.xAxis[c].getDescription()+"."}if(b)if(d.yAxis="The chart has "+b+(1<b?" Y axes":" Y axis")+" displaying ",2>b)d.yAxis+=this.yAxis[0].getDescription()+".";else{for(c=0;c<b-1;++c)d.yAxis+=(c?", ":"")+this.yAxis[c].getDescription();d.yAxis+=" and "+this.yAxis[c].getDescription()+"."}return d};e.Chart.prototype.addAccessibleContextMenuAttribs=function(){var a=this.exportDivElements;a&&(h(a,function(a){"DIV"!==
a.tagName||a.children&&a.children.length||(a.setAttribute("role","menuitem"),a.setAttribute("tabindex",-1))}),a[0].parentNode.setAttribute("role","menu"),a[0].parentNode.setAttribute("aria-label","Chart export"))};e.Point.prototype.highlight=function(){var a=this.series.chart;this.graphic&&this.graphic.element.focus&&this.graphic.element.focus();this.isNull?a.tooltip.hide(0):(this.onMouseOver(),a.tooltip.refresh(a.tooltip.shared?[this]:this));a.highlightedPoint=this;return this};e.Chart.prototype.highlightAdjacentPoint=
function(a){var b=this.series,d=this.highlightedPoint;if(!b[0]||!b[0].points)return!1;if(!d)return b[0].points[0].highlight();b=b[d.series.index+(a?1:-1)];d=a?d.series.points[d.index+1]||b&&b.points[0]:d.series.points[d.index-1]||b&&b.points[b.points.length-1];return void 0===d?!1:d.isNull&&this.options.accessibility.keyboardNavigation&&this.options.accessibility.keyboardNavigation.skipNullPoints?(this.highlightedPoint=d,this.highlightAdjacentPoint(a)):d.highlight()};e.Chart.prototype.showExportMenu=
function(){this.exportSVGElements&&this.exportSVGElements[0]&&(this.exportSVGElements[0].element.onclick(),this.highlightExportItem(0))};e.Chart.prototype.highlightExportItem=function(a){var b=this.exportDivElements&&this.exportDivElements[a],d=this.exportDivElements&&this.exportDivElements[this.highlightedExportItem];if(b&&"DIV"===b.tagName&&(!b.children||!b.children.length)){b.focus&&b.focus();if(d&&d.onmouseout)d.onmouseout();if(b.onmouseover)b.onmouseover();this.highlightedExportItem=a;return!0}};
e.Chart.prototype.highlightRangeSelectorButton=function(a){var b=this.rangeSelector.buttons;b[this.highlightedRangeSelectorItemIx]&&b[this.highlightedRangeSelectorItemIx].setState(this.oldRangeSelectorItemState||0);this.highlightedRangeSelectorItemIx=a;return b[a]?(b[a].element.focus&&b[a].element.focus(),this.oldRangeSelectorItemState=b[a].state,b[a].setState(2),!0):!1};e.Chart.prototype.hideExportMenu=function(){var a=this.exportDivElements;if(a){h(a,function(a){A(a,"mouseleave")});if(a[this.highlightedExportItem]&&
a[this.highlightedExportItem].onmouseout)a[this.highlightedExportItem].onmouseout();this.highlightedExportItem=0;this.renderTo.focus()}};e.Chart.prototype.addKeyboardNavEvents=function(){function a(c){this.keyCodeMap=c.keyCodeMap;this.move=c.move;this.validate=c.validate;this.init=c.init;this.transformTabs=!1!==c.transformTabs}function b(b,g){return new a(u({keyCodeMap:b,move:function(a){c.keyboardNavigationModuleIndex+=a;var b=c.keyboardNavigationModules[c.keyboardNavigationModuleIndex];if(b){if(b.validate&&
!b.validate())return this.move(a);if(b.init)return b.init(a),!0}c.keyboardNavigationModuleIndex=0;c.slipNextTab=!0;return!1}},g))}function d(a){a=a||w.event;var b=c.keyboardNavigationModules[c.keyboardNavigationModuleIndex];9===(a.which||a.keyCode)&&c.slipNextTab?c.slipNextTab=!1:(c.slipNextTab=!1,b&&b.run(a)&&a.preventDefault())}var c=this;a.prototype={run:function(c){var a=this,b=c.which||c.keyCode,d=!1,b=this.transformTabs&&9===b?c.shiftKey?37:39:b;h(this.keyCodeMap,function(e){-1<e[0].indexOf(b)&&
(d=!1===e[1].call(a,b,c)?!1:!0)});return d}};c.keyboardNavigationModules=[b([[[37,39],function(a){if(!c.highlightAdjacentPoint(39===a))return this.move(39===a?1:-1)}],[[38,40],function(a){var b;if(c.highlightedPoint)if((b=c.series[c.highlightedPoint.series.index+(38===a?-1:1)])&&b.points[0])b.points[0].highlight();else return this.move(40===a?1:-1)}],[[13,32],function(){c.highlightedPoint&&c.highlightedPoint.firePointEvent("click")}]],{init:function(a){var b=c.series&&c.series[c.series.length-1],
b=b&&b.points&&b.points[b.points.length-1];0>a&&b&&b.highlight()}}),b([[[37,38],function(){for(var a=c.highlightedExportItem||0,b=!0,d=c.series;a--;)if(c.highlightExportItem(a)){b=!1;break}if(b)return c.hideExportMenu(),d&&d.length&&(a=d[d.length-1],a.points.length&&a.points[a.points.length-1].highlight()),this.move(-1)}],[[39,40],function(){for(var a=!0,b=(c.highlightedExportItem||0)+1;b<c.exportDivElements.length;++b)if(c.highlightExportItem(b)){a=!1;break}if(a)return c.hideExportMenu(),this.move(1)}],
[[13,32],function(){t(c.exportDivElements[c.highlightedExportItem])}]],{validate:function(){return c.exportChart&&!(c.options.exporting&&!1===c.options.exporting.enabled)},init:function(a){c.highlightedPoint=null;c.showExportMenu();if(0>a&&c.exportDivElements)for(a=c.exportDivElements.length;-1<a&&!c.highlightExportItem(a);--a);}}),b([[[38,40,37,39],function(a){c[38===a||40===a?"yAxis":"xAxis"][0].panStep(39>a?-1:1)}],[[9],function(a,b){var d;c.mapNavButtons[c.focusedMapNavButtonIx].setState(0);if(b.shiftKey&&
!c.focusedMapNavButtonIx||!b.shiftKey&&c.focusedMapNavButtonIx)return c.mapZoom(),this.move(b.shiftKey?-1:1);c.focusedMapNavButtonIx+=b.shiftKey?-1:1;d=c.mapNavButtons[c.focusedMapNavButtonIx];d.element.focus&&d.element.focus();d.setState(2)}],[[13,32],function(){t(c.mapNavButtons[c.focusedMapNavButtonIx].element)}]],{validate:function(){return c.mapZoom&&c.mapNavButtons&&2===c.mapNavButtons.length},transformTabs:!1,init:function(a){var b=c.mapNavButtons[0],d=c.mapNavButtons[1],b=0<a?b:d;h(c.mapNavButtons,
function(a,c){a.element.setAttribute("tabindex",-1);a.element.setAttribute("role","button");a.element.setAttribute("aria-label","Zoom "+(c?"out":"")+"chart")});b.element.focus&&b.element.focus();b.setState(2);c.focusedMapNavButtonIx=0<a?0:1}}),b([[[37,39,38,40],function(a){a=37===a||38===a?-1:1;if(!c.highlightRangeSelectorButton(c.highlightedRangeSelectorItemIx+a))return this.move(a)}],[[13,32],function(){3!==c.oldRangeSelectorItemState&&t(c.rangeSelector.buttons[c.highlightedRangeSelectorItemIx].element)}]],
{validate:function(){return c.rangeSelector&&c.rangeSelector.buttons&&c.rangeSelector.buttons.length},init:function(a){h(c.rangeSelector.buttons,function(a){a.element.setAttribute("tabindex","-1");a.element.setAttribute("role","button");a.element.setAttribute("aria-label","Select range "+(a.text&&a.text.textStr))});c.highlightRangeSelectorButton(0<a?0:c.rangeSelector.buttons.length-1)}}),b([[[9,38,40],function(a,b){var d=9===a&&b.shiftKey||38===a?-1:1,e=c.highlightedInputRangeIx+=d;if(1<e||0>e)return this.move(d);
c.rangeSelector[e?"maxInput":"minInput"].focus()}]],{validate:function(){return c.rangeSelector&&!1!==c.options.rangeSelector.inputEnabled&&c.rangeSelector.minInput&&c.rangeSelector.maxInput},transformTabs:!1,init:function(a){h(["minInput","maxInput"],function(a,b){c.rangeSelector[a]&&(c.rangeSelector[a].setAttribute("tabindex","-1"),c.rangeSelector[a].setAttribute("role","textbox"),c.rangeSelector[a].setAttribute("aria-label","Select "+(b?"end":"start")+" date."))});c.highlightedInputRangeIx=0<a?
0:1;c.rangeSelector[c.highlightedInputRangeIx?"maxInput":"minInput"].focus()}})];c.keyboardNavigationModuleIndex=0;c.renderTo.tabIndex||c.renderTo.setAttribute("tabindex","0");v(c.renderTo,"keydown",d);v(c,"destroy",function(){z(c.renderTo,"keydown",d)})};e.Chart.prototype.addScreenReaderRegion=function(a){var b=this,d=b.series,c=b.options,e=c.accessibility,g=b.screenReaderRegion=m.createElement("div"),h=m.createElement("h3"),q=m.createElement("a"),r=m.createElement("h3"),k={position:"absolute",left:"-9999px",
top:"auto",width:"1px",height:"1px",overflow:"hidden"},l=b.types||[],l=(1===l.length&&"pie"===l[0]||"map"===l[0])&&{}||b.getAxesDescription(),n=d[0]&&p[d[0].type]||p["default"];g.setAttribute("role","region");g.setAttribute("aria-label","Chart screen reader information.");g.innerHTML=e.screenReaderSectionFormatter&&e.screenReaderSectionFormatter(b)||'<div tabindex="0">Use regions/landmarks to skip ahead to chart'+(1<d.length?" and navigate between data series":"")+".</div><h3>Summary.</h3><div>"+
(c.title.text||"Chart")+(c.subtitle&&c.subtitle.text?". "+c.subtitle.text:"")+"</div><h3>Long description.</h3><div>"+(c.chart.description||"No description available.")+"</div><h3>Structure.</h3><div>Chart type: "+(c.chart.typeDescription||b.getTypeDescription())+"</div>"+(1===d.length?"<div>"+n[0]+" with "+d[0].points.length+" "+(1===d[0].points.length?n[1]:n[2])+".</div>":"")+(l.xAxis?"<div>"+l.xAxis+"</div>":"")+(l.yAxis?"<div>"+l.yAxis+"</div>":"");b.getCSV&&(q.innerHTML="View as data table.",
q.href="#"+a,q.setAttribute("tabindex","-1"),q.onclick=e.onTableAnchorClick||function(){b.viewData();m.getElementById(a).focus()},h.appendChild(q),g.appendChild(h));r.innerHTML="Chart graphic.";b.renderTo.insertBefore(r,b.renderTo.firstChild);b.renderTo.insertBefore(g,b.renderTo.firstChild);u(!0,r.style,k);u(!0,g.style,k)};e.Chart.prototype.callbacks.push(function(a){var b=a.options,d=b.accessibility;if(d.enabled){var c=m.createElementNS("http://www.w3.org/2000/svg","title"),f=m.createElementNS("http://www.w3.org/2000/svg",
"g"),g=a.container.getElementsByTagName("desc")[0],k=a.container.getElementsByTagName("text"),q="highcharts-title-"+a.index,r="highcharts-data-table-"+a.index,p=b.title.text||"Chart",l=b.exporting&&b.exporting.csv&&b.exporting.csv.columnHeaderFormatter,n=[];c.textContent=p;c.id=q;g.parentNode.insertBefore(c,g);a.renderTo.setAttribute("role","region");a.renderTo.setAttribute("aria-label",p+". Use up and down arrows to navigate.");if(a.exportSVGElements&&a.exportSVGElements[0]&&a.exportSVGElements[0].element){var t=
a.exportSVGElements[0].element.onclick,c=a.exportSVGElements[0].element.parentNode;a.exportSVGElements[0].element.onclick=function(){t.apply(this,Array.prototype.slice.call(arguments));a.addAccessibleContextMenuAttribs();a.highlightExportItem(0)};a.exportSVGElements[0].element.setAttribute("role","button");a.exportSVGElements[0].element.setAttribute("aria-label","View export menu");f.appendChild(a.exportSVGElements[0].element);f.setAttribute("role","region");f.setAttribute("aria-label","Chart export menu");
c.appendChild(f)}h(k,function(a){a.setAttribute("aria-hidden","true")});a.addScreenReaderRegion(r);d.keyboardNavigation&&a.addKeyboardNavEvents();u(!0,b.exporting,{csv:{columnHeaderFormatter:function(a,c,b){var d=n[n.length-1];1<b&&(d&&d.text)!==a.name&&n.push({text:a.name,span:b});return l?l.call(this,a,c,b):1<b?c:a.name}}});e.wrap(a,"getTable",function(a){return a.apply(this,Array.prototype.slice.call(arguments,1)).replace("<table>",'<table id="'+r+'" summary="Table representation of chart"><caption>'+
p+"</caption>")});e.wrap(a,"viewData",function(a){if(!this.insertedTable){a.apply(this,Array.prototype.slice.call(arguments,1));var c=m.getElementById(r),b=c.getElementsByTagName("tbody")[0],d=b.firstChild.children,e="<tr><td></td>",f,g;c.setAttribute("tabindex","-1");h(b.children,function(a){f=a.firstChild;g=m.createElement("th");g.setAttribute("scope","row");g.innerHTML=f.innerHTML;f.parentNode.replaceChild(g,f)});h(d,function(a){"TH"===a.tagName&&a.setAttribute("scope","col")});n.length&&(h(n,
function(a){e+='<th scope="col" colspan="'+a.span+'">'+a.text+"</th>"}),b.insertAdjacentHTML("afterbegin",e))}})}})})(k)});
|
PypiClean
|
/object_detection_tf-0.1.2-py3-none-any.whl/object_detection/utils/np_mask_ops.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
EPSILON = 1e-7
def area(masks):
"""Computes area of masks.
Args:
masks: Numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*1] representing mask areas.
Raises:
ValueError: If masks.dtype is not np.uint8
"""
if masks.dtype != np.uint8:
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)
return answer
def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / np.maximum(union, EPSILON)
def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON)
|
PypiClean
|
/ShowerModel-0.1.9.tar.gz/ShowerModel-0.1.9/showermodel/projection.py
|
import numpy as np
import pandas as pd
# Class #######################################################################
class Projection(pd.DataFrame):
"""
DataFrame containing the projection of a shower track.
The track is viewed from a telescope position in both horizontal
coordinates system and FoV coordinates system as well as the fraction of
the track within the telescope field of view.
Parameters
----------
telescope : Telescope
Telescope object to be used.
track : Track or Shower
Track object to be used.
Attributes
----------
distance : float
Column 0, shower-to-telescope distance in km.
alt : float
Column 1, altitude in degrees (from horizon).
az : float
Column 2, azimuth in degrees (from north, clockwise).
theta : float
Column 3, offset angle in degrees relative to the telescope pointing
direction.
phi : float
Column 4, position angle in degrees from north in FoV projection.
beta : float
Column 5, angle in degrees relative to the apparent source position.
time : float
Column 6, arrival time in microseconds of photons emitted at each point
of the shower, where time=0 for photons produced at the top of the
atmosphere.
FoV : bool
Column 7, True if the shower point is within the telescope field of
view, False otherwise.
atmosphere : Atmosphere
Atmosphere object that is used.
track : Track
Track object that is used.
telescope : Telescope
Telescope object that is used.
distance_top : float
Distance in km to shower point at the top of the atmosphere.
beta_top : float
Beta angle in degrees of the shower point at the top of the
atmosphere.
distance_0 : float
Distance in km to the shower impact point at ground.
beta_0 : float
Beta angle in degrees of the shower impact point at ground.
distance_i : float
Distance in km to the first interaction point of the shower.
beta_i : float
Beta angle in degrees of the first interaction point of the shower.
distance_min : float
Minimum distance in km to (infinite) line going to the
shower axis.
alt_inf : float
Altitude in degrees of the apparent source position.
az_inf : float
Azimuth in degrees of the apparent source position.
theta_inf : float
Offset angle in degrees of the apparent source position.
phi_inf : float
Position angle in degrees of the apparent source position.
Methods
-------
show()
Show the projection of the shower track viewed by the telescope.
hor_to_FoV()
Convert cartesian coordinates from horizontal system to FoV system.
FoV_to_hor()
Convert cartesian coordinates from FoV system to horizontal system.
thetaphi_to_altaz()
Convert FoV coordinates theta/phi to horizontal coordinates alt/az.
altaz_to_thetaphi()
Convert horizontal coordinates alt/az to FoV coordinates theta/phi.
spherical()
Calculate the spherical coordinates in both horizontal and FoV systems.
"""
def __init__(self, telescope, track):
super().__init__(columns=['distance', 'alt', 'az', 'theta', 'phi',
'beta', 'time', 'FoV'])
_projection(self, telescope, track)
def show(self, axes=True, max_theta=30., X_mark=None):
"""
Show the projection of the shower track viewed by the telescope in both
horizontal and FoV coordinates systems.
Parameters
----------
axes : bool, default True
Show the axes of both frames of reference.
max_theta : float, default 30 degrees
Maximum offset angle in degrees relative to the telescope
pointing direction.
X_mark : float
Reference slant depth in g/cm^2 of the shower track to be
marked in the figure. If None, no mark is included.
Returns
-------
ax1, ax2 : PolarAxesSubplot
"""
from ._tools import show_projection
return show_projection(self, None, False, axes, max_theta, X_mark)
def altaz_to_thetaphi(self, alt, az):
"""
Convert polar horizontal coordinates alt, az to FoV coordinates
theta, phi.
Parameters
----------
alt, az : float or array_like
Returns
-------
theta, phi : float or array-like
See also
--------
Projection.hor_to_FoV : Convert cartesian coordinates from horizontal
system to FoV system.
Projection.thetaphi_to_altaz : Convert FoV coordinates theta, phi to
horizontal coordinates alt, az.
"""
return self.telescope.altaz_to_thetaphi(alt, az)
def hor_to_FoV(self, x_hor, y_hor, z_hor):
"""
Convert cartesian coordinates from horizontal system to FoV system.
In the FoV coordinates system, x_FoV grows in the right-hand direction,
y_FoV grows downward and z_FoV grows toward the pointing direction from
the telescope point of view.
Parameters
----------
x_hor, y_hor, z_hor : float or array-like
Returns
-------
x_FoV, y_FoV, z_FoV : floar or array_like
See also
--------
Projection.FoV_to_hor : Convert cartesian coordinates from FoV system
to horizontal system.
Projection.altaz_to_thetaphi : Convert horizontal coordinates alt, az
to FoV coordinates theta, phi.
"""
return self.telescope.hor_to_FoV(x_hor, y_hor, z_hor)
def thetaphi_to_altaz(self, theta, phi):
"""
Convert FoV coordinates theta, phi to horizontal coordinates alt, az.
Parameters
----------
theta, phi : float or array_like
Returns
-------
alt, az : float or array_like
See also
--------
Projection.FoV_to_hor : Convert cartesian coordinates from FoV system
to horizontal system.
Projection.altaz_to_thetaphi : Convert horizontal coordinates alt, az
to FoV coordinates theta, phi.
"""
return self.telescope.thetaphi_to_altaz(theta, phi)
def FoV_to_hor(self, x_FoV, y_FoV, z_FoV):
"""
Convert cartesian coordinates from FoV system to horizontal system.
In the FoV coordinates system, x_FoV grows in the right-hand direction,
y_FoV grows downward and z_FoV grows toward the pointing direction from
the telescope point of view.
Parameters
----------
x_FoV, y_FoV, z_FoV : float or array_like
Returns
-------
x_hor, y_hor, z_hor : float or array_like.
See also
--------
Projection.hor_to_FoV : Convert cartesian coordinates from horizontal
system to FoV system.
Projection.thetaphi_to_altaz : Convert FoV coordinates theta, phi to
horizontal coordinates alt, az.
"""
return self.telescope.FoV_to_hor(x_FoV, y_FoV, z_FoV)
def spherical(self, x, y, z):
"""
Calculate the spherical coordinates in both horizontal and FoV systems
from the 'absolute' x, y, z coordinates.
Parameters
----------
x, y, z : float or array_like
Returns
-------
distance, alt, az, theta, phi : float or array_like
"""
return self.telescope.spherical(x, y, z)
# Constructor #################################################################
def _projection(projection, telescope, track):
"""
Obtain the projection of a shower track viewed from the telescope position
in both horizontal coordiantes system (alt/az) and FoV coordinates system
(theta/phi) and determine the fraction of the track within the telescope
field of view.
Parameters
----------
projection : Projection
telescope : Telescope
track : Track or Shower
"""
from .telescope import Telescope
from .track import Track
from .shower import Shower
if isinstance(telescope, Telescope):
pass
# In case the input objects are not ordered correctly.
elif isinstance(telescope, Track):
telescope, track = (track, telescope)
elif isinstance(telescope, Shower):
telescope, shower = (track, telescope)
track = shower.track
else:
raise ValueError('The input telescope is not valid')
if isinstance(track, Track):
pass
elif isinstance(track, Shower):
shower = track
track = shower.track
else:
raise ValueError('The input track is not valid')
# projection = Projection(columns=['distance', 'alt', 'az', 'theta', 'phi',
# 'beta', 'time', 'FoV'])
projection.atmosphere = track.atmosphere
projection.track = track
projection.telescope = telescope
# Apparent position of the cosmic-ray source
projection.alt_inf = track.alt
projection.az_inf = track.az
theta_inf, phi_inf = telescope.altaz_to_thetaphi(track.alt, track.az)
projection.theta_inf = theta_inf
projection.phi_inf = phi_inf
# Shower spherical coordinates in both zenith and camera projections
distance, alt, az, theta, phi = telescope.spherical(track.x, track.y,
track.z)
projection.distance = distance
projection.alt = alt
projection.az = az
projection.theta = theta
projection.phi = phi
# Coordinates of first interaction point of the shower relative to
# the telescope position
distance_i, alt_i, az_i, theta_i, phi_i = telescope.spherical(track.xi,
track.yi, track.zi)
projection.distance_i = distance_i
projection.alt_i = alt_i
projection.az_i = az_i
projection.theta_i = theta_i
projection.phi_i = phi_i
# Angle formed by the shower axis (backwards) and the vector going
# from the telescope position to the first interaction point
xi, yi, zi = telescope.abs_to_rel(track.xi, track.yi, track.zi)
proj_u_i = xi * track.ux + yi * track.uy + zi * track.uz
beta_i = telescope.zr_to_theta(proj_u_i, distance_i)
# Coordinates of the shower point at the top of the atmosphere relative to
# the telescope position
if track.z_top is None:
distance_top = None
beta_top = None
elif track.z_top==track.zi:
distance_top = distance_i
proj_u_top = proj_u_i
beta_top = beta_i
else:
distance_top = telescope.distance(track.x_top, track.y_top, track.z_top)
# Angle formed by the shower axis (backwards) and the vector going
# from the telescope position to the first interaction point
x_top, y_top, z_top = telescope.abs_to_rel(track.x_top, track.y_top,
track.z_top)
proj_u_top = x_top * track.ux + y_top * track.uy + z_top * track.uz
beta_top = telescope.zr_to_theta(proj_u_top, distance_top)
projection.distance_top = distance_top
# Coordinates of the shower impact point at ground level relative to
# the telescope position and minimum shower-to-telescope distance
if track.z0 is None:
distance_0 = None
beta_0 = None
projection.alt_0 = None
projection.az_0 = None
projection.theta_0 = None
projection.phi_0 = None
# Minimum shower-to-telescope distance
distance_min = distance_i * np.sin(np.radians(beta_i))
elif track.z0==track.zi:
distance_0 = distance_i
proj_u_0 = proj_u_i
beta_0 = beta_i
projection.alt_0 = alt_i
projection.az_0 = az_i
projection.theta_0 = theta_i
projection.phi_0 = phi_i
# Minimum shower-to-telescope distance
distance_min = distance_i * np.sin(np.radians(beta_i))
else:
distance_0, alt_0, az_0, theta_0, phi_0 = telescope.spherical(track.x0,
track.y0, track.z0)
projection.alt_0 = alt_0
projection.az_0 = az_0
projection.theta_0 = theta_0
projection.phi_0 = phi_0
# Angle formed by the shower axis (backwards) and the vector going
# from the telescope position to the shower impact point at ground
x0, y0, z0 = telescope.abs_to_rel(track.x0, track.y0, track.z0)
proj_u_0 = x0 * track.ux + y0 * track.uy + z0 * track.uz
beta_0 = telescope.zr_to_theta(proj_u_0, distance_0)
if distance_0<distance_i:
# Minimum shower-to-telescope distance
distance_min = distance_0 * np.sin(np.radians(beta_0))
else:
distance_min = distance_i * np.sin(np.radians(beta_i))
projection.distance_0 = distance_0
# Half radius of the telescope mirror in km
half_R = np.sqrt(telescope.area / np.pi) / 2000.
# If the telescope is too close to the shower axis
x, y, z = telescope.abs_to_rel(track.x, track.y, track.z)
proj_u = x * track.ux + y * track.uy + z * track.uz
if distance_min < half_R:
# Force minimum beta due the finite dimensions of the telescope mirror
beta = telescope.xy_to_phi(proj_u, half_R)
beta_i = telescope.xy_to_phi(proj_u_i, half_R)
if track.z0 is not None:
beta_0 = telescope.xy_to_phi(proj_u_0, half_R)
if track.z_top is not None:
beta_top = telescope.xy_to_phi(proj_u_top, half_R)
else:
beta = telescope.zr_to_theta(proj_u, distance)
projection.beta = beta
projection.beta_i = beta_i
projection.beta_0 = beta_0
projection.beta_top = beta_top
# Travel time of photons reaching the telescope, with time=0 for photons
# emitted from the first interaction point. Equivalent to
# projection.time = track.t - (distance_i - distance) / 0.2998
# except for distance_min<half_R
projection.time = (track.t - distance_i / 0.2998
* (1. - np.sin(np.radians(projection.beta_i))
/ np.sin(np.radians(beta))))
# FoV = True for shower points within the telescope field of view
projection.FoV = ((projection.theta <= telescope.apert/2.)
& (projection.distance > 0.))
|
PypiClean
|
/tb-rest-client-3.5.tar.gz/tb-rest-client-3.5/tb_rest_client/models/models_pe/rule_node_id.py
|
# Copyright 2023. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re # noqa: F401
import six
class RuleNodeId(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'entity_type': 'str'
}
attribute_map = {
'id': 'id',
'entity_type': 'entityType'
}
def __init__(self, id=None, entity_type=None): # noqa: E501
"""RuleNodeId - a model defined in Swagger""" # noqa: E501
self._id = None
self._entity_type = None
self.discriminator = None
self.id = id
self.entity_type = entity_type
@property
def id(self):
"""Gets the id of this RuleNodeId. # noqa: E501
ID of the entity, time-based UUID v1 # noqa: E501
:return: The id of this RuleNodeId. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RuleNodeId.
ID of the entity, time-based UUID v1 # noqa: E501
:param id: The id of this RuleNodeId. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def entity_type(self):
"""Gets the entity_type of this RuleNodeId. # noqa: E501
string # noqa: E501
:return: The entity_type of this RuleNodeId. # noqa: E501
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""Sets the entity_type of this RuleNodeId.
string # noqa: E501
:param entity_type: The entity_type of this RuleNodeId. # noqa: E501
:type: str
"""
if entity_type is None:
raise ValueError("Invalid value for `entity_type`, must not be `None`") # noqa: E501
allowed_values = ["RULE_NODE"] # noqa: E501
if entity_type not in allowed_values:
raise ValueError(
"Invalid value for `entity_type` ({0}), must be one of {1}" # noqa: E501
.format(entity_type, allowed_values)
)
self._entity_type = entity_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleNodeId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleNodeId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/gvm_tools-23.9.0.tar.gz/gvm_tools-23.9.0/scripts/create-targets-from-host-list.gmp.py
|
import sys
import time
from argparse import ArgumentParser, Namespace, RawTextHelpFormatter
from pathlib import Path
from typing import List
from gvm.protocols.gmp import Gmp
from gvmtools.helper import error_and_exit
HELP_TEXT = (
"This script pulls hostnames from a text "
"file and creates a target for each."
)
def check_args(args):
len_args = len(args.script) - 1
if len_args != 2:
message = """
This script pulls hostnames from a text file and creates a target \
for each.
One parameter after the script name is required.
1. <hostname> -- IP of the GVM host
2. <hosts_textfile> -- text file containing hostnames
Example:
$ gvm-script --gmp-username name --gmp-password pass \
ssh --hostname <gsm> scripts/create_targets_from_host_list.gmp \
<hostname> <hosts_textfile>
"""
print(message)
sys.exit()
def parse_args(args: Namespace) -> Namespace: # pylint: disable=unused-argument
"""Parsing args ..."""
parser = ArgumentParser(
prefix_chars="+",
add_help=False,
formatter_class=RawTextHelpFormatter,
description=HELP_TEXT,
)
parser.add_argument(
"+h",
"++help",
action="help",
help="Show this help message and exit.",
)
parser.add_argument(
"hostname",
type=str,
help="Host name to create targets for.",
)
parser.add_argument(
"hosts_file",
type=str,
help=("File containing host names / IPs"),
)
ports = parser.add_mutually_exclusive_group()
ports.add_argument(
"+pl",
"++port-list-id",
type=str,
dest="port_list_id",
help="UUID of existing port list.",
)
ports.add_argument(
"+pr",
"++port-range",
dest="port_range",
type=str,
help=(
"Port range to create port list from, e.g. "
"T:1-1234 for ports 1-1234/TCP"
),
)
ports.set_defaults(
port_list_id="4a4717fe-57d2-11e1-9a26-406186ea4fc5"
) # All IANA assigned TCP and UDP
script_args, _ = parser.parse_known_args(args)
return script_args
def load_host_list(host_file):
try:
with open(host_file, encoding="utf-8") as f:
content = f.readlines()
host_list = [x.strip() for x in content]
host_list = list(filter(None, host_list))
except IOError as e:
error_and_exit(f"Failed to read host_file: {str(e)} (exit)")
if len(host_list) == 0:
error_and_exit("Host file is empty (exit)")
return host_list
def send_targets(
gmp: Gmp,
host_name: str,
host_file: Path,
host_list: List[str],
port_list_id: str,
):
print(f"\nSending targets from {host_file} to {host_name}...")
for host in host_list:
name = f"Target for {host}"
comment = f"Created: {time.strftime('%Y/%m/%d-%H:%M:%S')}"
hosts = [host]
gmp.create_target(
name=name, comment=comment, hosts=hosts, port_list_id=port_list_id
)
def main(gmp: Gmp, args: Namespace) -> None:
# pylint: disable=undefined-variable
if args.script:
args = args.script[1:]
parsed_args = parse_args(args=args)
hosts_list = load_host_list(parsed_args.hosts_file)
if parsed_args.port_range:
print(parsed_args.port_range)
resp = gmp.create_port_list(
name=f"Port list for target {parsed_args.hostname}",
port_range=parsed_args.port_range,
comment="Port List created by gvm-script",
)
port_list_id = resp.xpath("//@id")[0]
print(f"Port list {port_list_id} created!\n")
else:
port_list_id = parsed_args.port_list_id
send_targets(
gmp,
parsed_args.hostname,
parsed_args.hosts_file,
hosts_list,
port_list_id,
)
print("\n Target(s) created!\n")
if __name__ == "__gmp__":
main(gmp, args)
|
PypiClean
|
/django-dmcadmin-0.1.1.tar.gz/django-dmcadmin-0.1.1/dmcadmin/static/AdminLTE/plugins/moment/locale/en-gb.js
|
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
var enGb = moment.defineLocale('en-gb', {
months : 'January_February_March_April_May_June_July_August_September_October_November_December'.split('_'),
monthsShort : 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_'),
weekdays : 'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split('_'),
weekdaysShort : 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_'),
weekdaysMin : 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_'),
longDateFormat : {
LT : 'HH:mm',
LTS : 'HH:mm:ss',
L : 'DD/MM/YYYY',
LL : 'D MMMM YYYY',
LLL : 'D MMMM YYYY HH:mm',
LLLL : 'dddd, D MMMM YYYY HH:mm'
},
calendar : {
sameDay : '[Today at] LT',
nextDay : '[Tomorrow at] LT',
nextWeek : 'dddd [at] LT',
lastDay : '[Yesterday at] LT',
lastWeek : '[Last] dddd [at] LT',
sameElse : 'L'
},
relativeTime : {
future : 'in %s',
past : '%s ago',
s : 'a few seconds',
ss : '%d seconds',
m : 'a minute',
mm : '%d minutes',
h : 'an hour',
hh : '%d hours',
d : 'a day',
dd : '%d days',
M : 'a month',
MM : '%d months',
y : 'a year',
yy : '%d years'
},
dayOfMonthOrdinalParse: /\d{1,2}(st|nd|rd|th)/,
ordinal : function (number) {
var b = number % 10,
output = (~~(number % 100 / 10) === 1) ? 'th' :
(b === 1) ? 'st' :
(b === 2) ? 'nd' :
(b === 3) ? 'rd' : 'th';
return number + output;
},
week : {
dow : 1, // Monday is the first day of the week.
doy : 4 // The week that contains Jan 4th is the first week of the year.
}
});
return enGb;
})));
|
PypiClean
|
/git-cola-4.3.2.tar.gz/git-cola-4.3.2/cola/models/dag.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from .. import core
from .. import utils
from ..models import prefs
# put summary at the end b/c it can contain
# any number of funky characters, including the separator
logfmt = r'format:%H%x01%P%x01%d%x01%an%x01%ad%x01%ae%x01%s'
logsep = chr(0x01)
class CommitFactory(object):
root_generation = 0
commits = {}
@classmethod
def reset(cls):
cls.commits.clear()
cls.root_generation = 0
@classmethod
def new(cls, oid=None, log_entry=None):
if not oid and log_entry:
oid = log_entry[:40]
try:
commit = cls.commits[oid]
if log_entry and not commit.parsed:
commit.parse(log_entry)
cls.root_generation = max(commit.generation, cls.root_generation)
except KeyError:
commit = Commit(oid=oid, log_entry=log_entry)
if not log_entry:
cls.root_generation += 1
commit.generation = max(commit.generation, cls.root_generation)
cls.commits[oid] = commit
return commit
class DAG(object):
def __init__(self, ref, count):
self.ref = ref
self.count = count
self.overrides = {}
def set_ref(self, ref):
changed = ref != self.ref
if changed:
self.ref = ref
return changed
def set_count(self, count):
changed = count != self.count
if changed:
self.count = count
return changed
def set_arguments(self, args):
if args is None:
return
if self.set_count(args.count):
self.overrides['count'] = args.count
if hasattr(args, 'args') and args.args:
ref = core.list2cmdline(args.args)
if self.set_ref(ref):
self.overrides['ref'] = ref
def overridden(self, opt):
return opt in self.overrides
def paths(self):
all_refs = utils.shell_split(self.ref)
if '--' in all_refs:
all_refs = all_refs[all_refs.index('--') :]
return [p for p in all_refs if p and core.exists(p)]
class Commit(object):
root_generation = 0
__slots__ = (
'oid',
'summary',
'parents',
'children',
'branches',
'tags',
'author',
'authdate',
'email',
'generation',
'column',
'row',
'parsed',
)
def __init__(self, oid=None, log_entry=None):
self.oid = oid
self.summary = None
self.parents = []
self.children = []
self.tags = []
self.branches = []
self.email = None
self.author = None
self.authdate = None
self.parsed = False
self.generation = CommitFactory.root_generation
self.column = None
self.row = None
if log_entry:
self.parse(log_entry)
def parse(self, log_entry, sep=logsep):
self.oid = log_entry[:40]
after_oid = log_entry[41:]
details = after_oid.split(sep, 5)
(parents, tags, author, authdate, email, summary) = details
self.summary = summary if summary else ''
self.author = author if author else ''
self.authdate = authdate if authdate else ''
self.email = email if email else ''
if parents:
generation = None
for parent_oid in parents.split(' '):
parent = CommitFactory.new(oid=parent_oid)
parent.children.append(self)
if generation is None:
generation = parent.generation + 1
self.parents.append(parent)
generation = max(parent.generation + 1, generation)
self.generation = generation
if tags:
for tag in tags[2:-1].split(', '):
self.add_label(tag)
self.parsed = True
return self
def add_label(self, tag):
"""Add tag/branch labels from `git log --decorate ....`"""
if tag.startswith('tag: '):
tag = tag[5:] # strip off "tag: " leaving refs/tags/
if tag.startswith('refs/heads/'):
branch = tag[11:]
self.branches.append(branch)
if tag.startswith('refs/'):
# strip off refs/ leaving just tags/XXX remotes/XXX heads/XXX
tag = tag[5:]
if tag.endswith('/HEAD'):
return
# Git 2.4 Release Notes (draft)
# =============================
#
# Backward compatibility warning(s)
# ---------------------------------
#
# This release has a few changes in the user-visible output from
# Porcelain commands. These are not meant to be parsed by scripts, but
# the users still may want to be aware of the changes:
#
# * Output from "git log --decorate" (and "%d" format specifier used in
# the userformat "--format=<string>" parameter "git log" family of
# command takes) used to list "HEAD" just like other tips of branch
# names, separated with a comma in between. E.g.
#
# $ git log --decorate -1 main
# commit bdb0f6788fa5e3cacc4315e9ff318a27b2676ff4 (HEAD, main)
# ...
#
# This release updates the output slightly when HEAD refers to the tip
# of a branch whose name is also shown in the output. The above is
# shown as:
#
# $ git log --decorate -1 main
# commit bdb0f6788fa5e3cacc4315e9ff318a27b2676ff4 (HEAD -> main)
# ...
#
# C.f. http://thread.gmane.org/gmane.linux.kernel/1931234
head_arrow = 'HEAD -> '
if tag.startswith(head_arrow):
self.tags.append('HEAD')
self.add_label(tag[len(head_arrow) :])
else:
self.tags.append(tag)
def __str__(self):
return self.oid
def data(self):
return {
'oid': self.oid,
'summary': self.summary,
'author': self.author,
'authdate': self.authdate,
'parents': [p.oid for p in self.parents],
'tags': self.tags,
}
def __repr__(self):
return json.dumps(self.data(), sort_keys=True, indent=4, default=list)
def is_fork(self):
'''Returns True if the node is a fork'''
return len(self.children) > 1
def is_merge(self):
'''Returns True if the node is a fork'''
return len(self.parents) > 1
class RepoReader(object):
def __init__(self, context, params):
self.context = context
self.params = params
self.git = context.git
self.returncode = 0
self._proc = None
self._objects = {}
self._cmd = [
'git',
'-c',
'log.abbrevCommit=false',
'-c',
'log.showSignature=false',
'log',
'--topo-order',
'--reverse',
'--decorate=full',
'--pretty=' + logfmt,
]
self._cached = False
"""Indicates that all data has been read"""
self._topo_list = []
"""List of commits objects in topological order"""
cached = property(lambda self: self._cached)
"""Return True when no commits remain to be read"""
def __len__(self):
return len(self._topo_list)
def reset(self):
CommitFactory.reset()
if self._proc:
self._proc.kill()
self._proc = None
self._cached = False
self._topo_list = []
def get(self):
"""Generator function returns Commit objects found by the params"""
if self._cached:
idx = 0
while True:
try:
yield self._topo_list[idx]
except IndexError:
break
idx += 1
return
self.reset()
ref_args = utils.shell_split(self.params.ref)
cmd = (
self._cmd
+ ['-%d' % self.params.count]
+ ['--date=%s' % prefs.logdate(self.context)]
+ ref_args
)
self._proc = core.start_command(cmd)
while True:
log_entry = core.readline(self._proc.stdout).rstrip()
if not log_entry:
self._cached = True
self._proc.wait()
self.returncode = self._proc.returncode
self._proc = None
break
oid = log_entry[:40]
try:
yield self._objects[oid]
except KeyError:
commit = CommitFactory.new(log_entry=log_entry)
self._objects[commit.oid] = commit
self._topo_list.append(commit)
yield commit
return
def __getitem__(self, oid):
return self._objects[oid]
def items(self):
return list(self._objects.items())
|
PypiClean
|
/dawa_facade-2017.12.1b3.tar.gz/dawa_facade-2017.12.1b3/dawa_facade/util/dawa_session.py
|
import requests
import dawa_facade.util.exceptions
class DawaSession(requests.Session):
def __init__(self, base_url: str, timeout):
super().__init__()
self.base_url = base_url
self.timeout = timeout
def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None,
allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None,
json=None, **kwargs):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
If the URL starts with / then the url is prefixed by the base url.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
if url.startswith('/'):
url = self.base_url + url
timeout = self.timeout
if 'timeout' in kwargs:
timeout = kwargs['timeout']
return super().request(method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects,
proxies, hooks, stream, verify, cert, json)
def get(self, url, **kwargs) -> requests.Response:
"""Sends a GET request. Returns :class:`requests.Response` object.
:param url: URL for the new :class:`requests.Request` object.
:param kwargs: Optional arguments that ``request`` takes.
"""
if 'stream' not in kwargs:
kwargs['stream'] = True
response = super().get(url, **kwargs)
# Check that the content type is as expected
content_type = response.headers.get('Content-Type', '** NOT IN RESPONSE HEADERS **') # type: str
if not content_type.startswith('application/json'):
raise dawa_facade.util.exceptions.UnknownContentType(
status_code=response.status_code,
details={'expected': 'application/json', 'got': content_type}
)
# Check if we got an exception
if response.status_code != 200:
data = response.json()
if response.status_code == 404 and isinstance(data, list) and len(data) == 0:
# Not found but no error code
return response
if 'type' in data:
# The type of the exception was provided by DAWA, check if we have implemented it
exception_class = getattr(
o=dawa_facade.util.exceptions,
name=data['type'],
default=dawa_facade.util.exceptions.UnknownException
)
# No matter what, it is for sure a subclass of DawaException we have now
assert issubclass(exception_class, dawa_facade.util.exceptions.DawaException)
if 'details' in data:
# DAWA also provided details about the exception, include these
raise exception_class(status_code=response.status_code, details=data['details'])
# No details
raise exception_class(status_code=response.status_code)
# Unknown type of exception
raise dawa_facade.util.exceptions.UnknownException(status_code=response.status_code, details=data)
# Yay, we got status code 200, everything must be fine
return response
|
PypiClean
|
/python-airtable-0.5.tar.gz/python-airtable-0.5/README.md
|
# Python Airtable
[](https://pypi.python.org/pypi/python-airtable/)
[](https://pypi.python.org/pypi/python-airtable/)
[](https://pypi.python.org/pypi/python-airtable/)
[](https://pypi.python.org/pypi/python-airtable/)
[](https://pypi.python.org/pypi/python-airtable/)
[](http://airtable-python-wrapper.readthedocs.io/en/latest/?badge=latest)
This is a Python module for accessing Airtable largely based on the original [airtable-python-wrapper](https://github.com/gtalarico/airtable-python-wrapper/) by [Gui Talarico](https://github.com/gtalarico) with some modifications.
## Installing
```
pip install python-airtable
```
## Documentation
Thee original full documentation is available [here](http://airtable-python-wrapper.readthedocs.io/).
### Usage Example
```python
from airtable import Airtable
# We updated the signature of `Airtable` class to support `airtable://` scheme URLs along with `view` and `sort` supported within the URLs.
airtable = Airtable('airtable://app1234567890/table_name?view=My%20View&sort=ID')
for record_id, fields in airtable.iter_records():
print(f'Record ID: {record_id}, Fields: {fields}')
# Now you can get all the Airtable records as a big dictionary with record ID as keys
airtable.get_all_as_dict()
airtable.insert({'Name': 'Brian'})
# We added `batch_insert` and support generators for the records arguments; chunking of records to 10 each is done automatically.
airtable.batch_insert([record1, record2, ...])
airtable.batch_update([(id1, record1), (id2, record2), ...)) # same for batch_update
airtable.search('Name', 'Tom')
airtable.update_by_field('Name', 'Tom', {'Phone': '1234-4445'})
airtable.delete_by_field('Name', 'Tom')
```
|
PypiClean
|
/ztfy.myams-0.1.33.tar.gz/ztfy.myams-0.1.33/src/ztfy/myams/resources/js/ext/bootstrap-modal.min.js
|
!function(t){"use strict";var i=function(t,i){this.init(t,i)};i.prototype={constructor:i,init:function(i,e){var n=this;this.options=e,this.$element=t(i).delegate('[data-dismiss="modal"]',"click.dismiss.modal",t.proxy(this.hide,this)),this.options.remote&&this.$element.find(".modal-body").load(this.options.remote,function(){var i=t.Event("loaded");n.$element.trigger(i)});var s="function"==typeof this.options.manager?this.options.manager.call(this):this.options.manager;(s=s.appendModal?s:t(s).modalmanager().data("modalmanager")).appendModal(this)},toggle:function(){return this[this.isShown?"hide":"show"]()},show:function(){var i=t.Event("show");this.isShown||(this.$element.trigger(i),i.isDefaultPrevented()||(this.escape(),this.tab(),this.options.loading&&this.loading()))},hide:function(i){if(i&&i.preventDefault(),i=t.Event("hide"),this.$element.trigger(i),!this.isShown||i.isDefaultPrevented())return this.isShown=!1;this.isShown=!1,this.escape(),this.tab(),this.isLoading&&this.loading(),t(document).off("focusin.modal"),this.$element.removeClass("in").removeClass("animated").removeClass(this.options.attentionAnimation).removeClass("modal-overflow").attr("aria-hidden",!0),t.support.transition&&this.$element.hasClass("fade")?this.hideWithTransition():this.hideModal()},layout:function(){var i=this.options.height?"height":"max-height",e=this.options.height||this.options.maxHeight;if(this.options.width){this.$element.css("width",this.options.width);var n=this;this.$element.css("margin-left",function(){return/%/gi.test(n.options.width)?-parseInt(n.options.width)/2+"%":-t(this).width()/2+"px"})}else this.$element.css("width",""),this.$element.css("margin-left","");if(this.$element.find(this.options.overflow).css("overflow","").css(i,""),e){var s=this.$element.find(this.options.overflow);void 0===s.css("overflow")&&s.style("overflow","auto","important"),s.style(i,("function"==typeof e?e():e)+"px","important")}t(window).height(),this.$element.height();this.$element.css("margin-top",0).addClass("modal-overflow")},tab:function(){var i=this;this.isShown&&this.options.consumeTab?this.$element.on("keydown.tabindex.modal","[data-tabindex]",function(e){if(e.keyCode&&9==e.keyCode){var n=[],s=Number(t(this).data("tabindex"));i.$element.find("[data-tabindex]:enabled:visible:not([readonly])").each(function(i){n.push(Number(t(this).data("tabindex")))}),n.sort(function(t,i){return t-i});var o=t.inArray(s,n);e.shiftKey?0==o?i.$element.find("[data-tabindex="+n[n.length-1]+"]").focus():i.$element.find("[data-tabindex="+n[o-1]+"]").focus():o<n.length-1?i.$element.find("[data-tabindex="+n[o+1]+"]").focus():i.$element.find("[data-tabindex="+n[0]+"]").focus(),e.preventDefault()}}):this.isShown||this.$element.off("keydown.tabindex.modal")},escape:function(){var t=this;this.isShown&&this.options.keyboard?(this.$element.attr("tabindex")||this.$element.attr("tabindex",-1),this.$element.on("keyup.dismiss.modal",function(i){27==i.which&&t.hide()})):this.isShown||this.$element.off("keyup.dismiss.modal")},hideWithTransition:function(){var i=this,e=setTimeout(function(){i.$element.off(t.support.transition.end),i.hideModal()},500);this.$element.one(t.support.transition.end,function(){clearTimeout(e),i.hideModal()})},hideModal:function(){var t=this.options.height?"height":"max-height";(this.options.height||this.options.maxHeight)&&this.$element.find(this.options.overflow).css("overflow","").css(t,""),this.$element.hide().trigger("hidden")},removeLoading:function(){this.$loading.remove(),this.$loading=null,this.isLoading=!1},loading:function(i){i=i||function(){};var e=this.$element.hasClass("fade")?"fade":"";if(this.isLoading)if(this.isLoading&&this.$loading){this.$loading.removeClass("in");var n=this;t.support.transition&&this.$element.hasClass("fade")?this.$loading.one(t.support.transition.end,function(){n.removeLoading()}):n.removeLoading()}else i&&i(this.isLoading);else{var s=t.support.transition&&e;this.$loading=t('<div class="loading-mask '+e+'">').append(this.options.spinner).appendTo(this.$element),s&&this.$loading[0].offsetWidth,this.$loading.addClass("in"),this.isLoading=!0,s?this.$loading.one(t.support.transition.end,i):i()}},focus:function(){var t=this.$element.find(this.options.focusOn);(t=t.length?t:this.$element).focus()},attention:function(){if(this.options.attentionAnimation){this.$element.removeClass("animated").removeClass(this.options.attentionAnimation);var t=this;setTimeout(function(){t.$element.addClass("animated").addClass(t.options.attentionAnimation)},0)}this.focus()},destroy:function(){var i=t.Event("destroy");this.$element.trigger(i),i.isDefaultPrevented()||(this.$element.off(".modal").removeData("modal").removeClass("in").attr("aria-hidden",!0),this.$parent!==this.$element.parent()?this.$element.appendTo(this.$parent):this.$parent.length||(this.$element.remove(),this.$element=null),this.$element.trigger("destroyed"))}},t.fn.modal=function(e,n){return this.each(function(){var s=t(this),o=s.data("modal"),a=t.extend({},t.fn.modal.defaults,s.data(),"object"==typeof e&&e);o||s.data("modal",o=new i(this,a)),"string"==typeof e?o[e].apply(o,[].concat(n)):a.show&&o.show()})},t.fn.modal.defaults={keyboard:!0,backdrop:!0,loading:!1,show:!0,width:null,height:null,maxHeight:null,modalOverflow:!1,consumeTab:!0,focusOn:null,replace:!1,resize:!1,overflow:".modal-body",attentionAnimation:"shake",manager:"body",spinner:'<div class="loading-spinner" style="width: 200px; margin-left: -100px;"><div class="progress progress-striped active"><div class="bar" style="width: 100%;"></div></div></div>',backdropTemplate:'<div class="modal-backdrop" />'},t.fn.modal.Constructor=i,t(function(){t(document).off("click.modal").on("click.modal.data-api",'[data-toggle="modal"]',function(i){var e=t(this),n=e.attr("href"),s=t(e.attr("data-target")||n&&n.replace(/.*(?=#[^\s]+$)/,"")),o=s.data("modal")?"toggle":t.extend({remote:!/#/.test(n)&&n},s.data(),e.data());i.preventDefault(),s.modal(o).one("hide",function(){e.focus()})})})}(window.jQuery);
|
PypiClean
|
/throat-0.0.0-py3-none-any.whl/app/views/user.py
|
import time
from peewee import fn, JOIN
from flask import Blueprint, render_template, abort, redirect, url_for, flash
from flask_login import login_required, current_user
from flask_babel import _, Locale
from .do import uid_from_recovery_token, info_from_email_confirmation_token
from .. import misc, config
from ..auth import auth_provider, email_validation_is_required
from ..misc import engine, send_email
from ..forms import EditUserForm, CreateUserMessageForm, EditAccountForm, DeleteAccountForm, PasswordRecoveryForm
from ..forms import PasswordResetForm
from ..models import User, UserStatus, UserMetadata
from ..models import Sub, SubMod, SubPost, SubPostComment, UserSaved, InviteCode
bp = Blueprint('user', __name__)
@bp.route("/u/<user>")
def view(user):
""" WIP: View user's profile, posts, comments, badges, etc """
try:
user = User.get(fn.lower(User.name) == user.lower())
except User.DoesNotExist:
abort(404)
if user.status == 10:
abort(404)
modsquery = SubMod.select(Sub.name, SubMod.power_level).join(Sub).where(
(SubMod.uid == user.uid) & (SubMod.invite == False))
owns = [x.sub.name for x in modsquery if x.power_level == 0]
mods = [x.sub.name for x in modsquery if 1 <= x.power_level <= 2]
invitecodeinfo = misc.getInviteCodeInfo(user.uid)
badges = misc.getUserBadges(user.uid)
pcount = SubPost.select().where(SubPost.uid == user.uid).count()
ccount = SubPostComment.select().where(SubPostComment.uid == user.uid).count()
habit = Sub.select(Sub.name, fn.Count(SubPost.pid).alias('count')).join(SubPost, JOIN.LEFT_OUTER,
on=(SubPost.sid == Sub.sid))
habit = habit.where(SubPost.uid == user.uid).group_by(Sub.sid).order_by(fn.Count(SubPost.pid).desc()).limit(10)
level, xp = misc.get_user_level(user.uid)
if xp > 0:
currlv = (level ** 2) * 10
nextlv = ((level + 1) ** 2) * 10
required_xp = nextlv - currlv
progress = ((xp - currlv) / required_xp) * 100
else:
progress = 0
givenScore = misc.getUserGivenScore(user.uid)
return engine.get_template('user/profile.html').render(
{'user': user, 'level': level, 'progress': progress, 'postCount': pcount, 'commentCount': ccount,
'givenScore': givenScore, 'invitecodeinfo': invitecodeinfo, 'badges': badges, 'owns': owns, 'mods': mods, 'habits': habit,
'msgform': CreateUserMessageForm()})
@bp.route("/u/<user>/posts", defaults={'page': 1})
@bp.route("/u/<user>/posts/<int:page>")
def view_user_posts(user, page):
""" WIP: View user's recent posts """
try:
user = User.get(fn.Lower(User.name) == user.lower())
except User.DoesNotExist:
abort(404)
if user.status == 10:
abort(404)
if current_user.is_admin():
posts = misc.getPostList(misc.postListQueryBase(adminDetail=True).where(User.uid == user.uid),
'new', page).dicts()
else:
posts = misc.getPostList(misc.postListQueryBase(noAllFilter=True).where(User.uid == user.uid),
'new', page).dicts()
return render_template('userposts.html', page=page, sort_type='user.view_user_posts',
posts=posts, user=user)
@bp.route("/u/<user>/savedposts", defaults={'page': 1})
@bp.route("/u/<user>/savedposts/<int:page>")
@login_required
def view_user_savedposts(user, page):
""" WIP: View user's saved posts """
if current_user.name.lower() == user.lower():
posts = misc.getPostList(
misc.postListQueryBase(noAllFilter=True).join(UserSaved, on=(UserSaved.pid == SubPost.pid)).where(
UserSaved.uid == current_user.uid),
'new', page).dicts()
return render_template('userposts.html', page=page,
sort_type='user.view_user_savedposts',
posts=posts, user=current_user)
else:
abort(403)
@bp.route("/u/<user>/comments", defaults={'page': 1})
@bp.route("/u/<user>/comments/<int:page>")
def view_user_comments(user, page):
""" WIP: View user's recent comments """
try:
user = User.get(fn.Lower(User.name) == user.lower())
except User.DoesNotExist:
abort(404)
if user.status == 10:
abort(404)
comments = misc.getUserComments(user.uid, page)
return render_template('usercomments.html', user=user, page=page, comments=comments)
@bp.route("/settings/invite")
@login_required
def invite_codes():
if not misc.enableInviteCode():
return redirect('/settings')
codes = InviteCode.select().where(InviteCode.user == current_user.uid)
maxcodes = int(misc.getMaxCodes(current_user.uid))
created = codes.count()
avail = 0
if (maxcodes - created) >= 0:
avail = maxcodes - created
return engine.get_template('user/settings/invitecode.html').render(
{'codes': codes, 'created': created, 'max': maxcodes, 'avail': avail,
'user': User.get(User.uid == current_user.uid)})
@bp.route('/settings/subs')
@login_required
def edit_subs():
return engine.get_template('user/topbar.html').render({})
@bp.route("/settings")
@login_required
def edit_user():
styles = 'nostyles' in current_user.prefs
nsfw = 'nsfw' in current_user.prefs
exp = 'labrat' in current_user.prefs
noscroll = 'noscroll' in current_user.prefs
nochat = 'nochat' in current_user.prefs
form = EditUserForm(show_nsfw=nsfw,
disable_sub_style=styles, experimental=exp,
noscroll=noscroll, nochat=nochat, subtheme=current_user.subtheme,
language=current_user.language)
languages = config.app.languages
form.language.choices = [('', _('Auto detect'))]
for i in languages:
form.language.choices.append((i, Locale(*i.split("_")).display_name.capitalize()))
return engine.get_template('user/settings/preferences.html').render({'edituserform': form, 'user': User.get(User.uid == current_user.uid)})
@bp.route("/settings/account")
@login_required
def edit_account():
return engine.get_template('user/settings/account.html').render(
{'form': EditAccountForm(),
'user': User.get(User.uid == current_user.uid)})
@bp.route('/settings/account/confirm-email/<token>')
def confirm_email_change(token):
info = info_from_email_confirmation_token(token)
user = None
try:
user = User.get(User.uid == info['uid'])
except (TypeError, User.DoesNotExist):
flash(_('The link you used is invalid or has expired'), 'error')
return redirect(url_for('user.edit_account'))
if user.status == UserStatus.OK:
auth_provider.confirm_pending_email(user, info['email'])
flash(_('Your password recovery email address is now confirmed!'), 'message')
return redirect(url_for('user.edit_account'))
return redirect(url_for('home.index'))
@bp.route("/settings/delete")
@login_required
def delete_account():
return engine.get_template('user/settings/delete.html').render({'form': DeleteAccountForm(), 'user': User.get(User.uid == current_user.uid)})
@bp.route("/recover")
def password_recovery():
""" Endpoint for the password recovery form """
if current_user.is_authenticated:
return redirect(url_for('home.index'))
form = PasswordRecoveryForm()
form.cap_key, form.cap_b64 = misc.create_captcha()
return engine.get_template('user/password_recovery.html').render({'lpform': form})
@bp.route('/reset/<token>')
def password_reset(token):
""" The page that actually resets the password """
user = None
try:
user = User.get(User.uid == uid_from_recovery_token(token))
except User.DoesNotExist:
pass
if user == None or user.status != UserStatus.OK:
flash(_('Password reset link was invalid or expired'), 'error')
return redirect(url_for('user.password_recovery'))
if current_user.is_authenticated:
return redirect(url_for('home.index'))
form = PasswordResetForm(key=token, user=user.uid)
return engine.get_template('user/password_reset.html').render({'lpform': form})
|
PypiClean
|
/NinjaTools-0.68-py3-none-any.whl/ninja_tools/keyboard.py
|
import ctypes
from random import uniform
from time import sleep
import win32gui
import ninja_tools.keycodes as key
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
class MOUSE_INPUT(ctypes.Structure):
_fields_ = (('dx', ctypes.c_long),
('dy', ctypes.c_long),
('mouseData', ctypes.c_ulong),
('dwFlags', ctypes.c_ulong),
('time', ctypes.c_ulong),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)))
class KEYBOARD_INPUT(ctypes.Structure):
_fields_ = (('wVk', ctypes.c_ushort),
('wScan', ctypes.c_ushort),
('dwFlags', ctypes.c_ulong),
('time', ctypes.c_ulong),
('dwExtraInfo', ctypes.POINTER(ctypes.c_ulong)))
class HARDWARE_INPUT(ctypes.Structure):
_fields_ = (('uMsg', ctypes.c_ulong),
('wParamL', ctypes.c_ushort),
('wParamH', ctypes.c_ushort))
class _INPUT_union(ctypes.Union):
_fields_ = (('mi', MOUSE_INPUT),
('ki', KEYBOARD_INPUT),
('hi', HARDWARE_INPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', ctypes.c_ulong),
('union', _INPUT_union))
def send_input(*inputs):
n_inputs = len(inputs)
lp_input = INPUT * n_inputs
p_inputs = lp_input(*inputs)
cb_size = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(n_inputs, p_inputs, cb_size)
def input_structure(structure):
if isinstance(structure, MOUSE_INPUT):
return INPUT(INPUT_MOUSE, _INPUT_union(mi=structure))
if isinstance(structure, KEYBOARD_INPUT):
return INPUT(INPUT_KEYBOARD, _INPUT_union(ki=structure))
if isinstance(structure, HARDWARE_INPUT):
return INPUT(INPUT_HARDWARE, _INPUT_union(hi=structure))
raise TypeError('Cannot create INPUT structure!')
def keyboard_input_unicode(code, flags=0):
flags = KEYEVENTF_UNICODE | flags
return KEYBOARD_INPUT(0, code, flags, 0, None)
def keyboard_input_vk(code, flags=0):
return KEYBOARD_INPUT(code, code, flags, 0, None)
def keyboard_event_unicode(code, _):
return input_structure(keyboard_input_unicode(code))
def keyboard_event_vk(code, flags=0):
return input_structure(keyboard_input_vk(code, flags))
def is_pressed(code):
return ctypes.windll.user32.GetKeyState(code) & 0x8000
def delay(min_delay=50, max_delay=100):
sleep(uniform(min_delay / 1000, max_delay / 1000))
def key_down(code):
send_input(keyboard_event_vk(code, flags=0))
def key_up(code):
send_input(keyboard_event_vk(code, KEYEVENTF_KEYUP))
def press(code, pause=None):
key_down(code)
if pause:
sleep(pause / 1000)
else:
delay()
key_up(code)
if pause:
sleep(pause / 1000)
else:
delay()
def two_keys_combo(key1, key2):
send_input(keyboard_event_vk(key1), keyboard_event_vk(key2))
delay()
send_input(keyboard_event_vk(key2, KEYEVENTF_KEYUP),
keyboard_event_vk(key1, KEYEVENTF_KEYUP))
delay()
def _press(character):
unicode_to_vk = {
'\r': 0x0D,
'\n': 0x0D,
}
if character in unicode_to_vk:
return press(unicode_to_vk[character])
code = ord(character)
send_input(keyboard_event_unicode(code, 0))
delay()
send_input(keyboard_event_unicode(code, KEYEVENTF_KEYUP))
delay()
def type_stream(string):
for char in string:
_press(char)
class WindowPress:
def __init__(self, handle=None, window_name=None, focus=True, foreground=True, attach=True):
self.user32 = ctypes.windll.user32
self.kernel32 = ctypes.windll.kernel32
self.focus = focus
self.foreground = foreground
self.attach = attach
if handle is None and window_name is not None:
self.handle = win32gui.FindWindow(None, window_name)
else:
self.handle = handle
self.key = key
def press(self, key_code):
user32 = ctypes.windll.user32
kernel32 = ctypes.windll.kernel32
# Attach the current thread to the thread of the window you want to send input to
window_foreground = user32.GetForegroundWindow()
thread_current = kernel32.GetCurrentThreadId()
thread_window = user32.GetWindowThreadProcessId(self.handle, None)
if self.attach and thread_current != thread_window:
user32.AttachThreadInput(thread_window, thread_current, True)
if self.focus:
user32.SetFocus(self.handle)
if self.foreground:
user32.SetForegroundWindow(self.handle)
# Press key
press(key_code)
# Detach the current thread from the window
if self.attach and thread_current == thread_window:
user32.AttachThreadInput(thread_window, thread_current, False)
if self.focus:
user32.SetFocus(window_foreground)
if self.foreground:
user32.SetForegroundWindow(window_foreground)
|
PypiClean
|
/wildbook_ia-4.0.4-py3-none-any.whl/wbia/control/manual_chip_funcs.py
|
import logging
from os.path import join
import utool as ut
from wbia import constants as const
from wbia.control import accessor_decors, controller_inject
from wbia.control.controller_inject import make_ibs_register_decorator
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
CLASS_INJECT_KEY, register_ibs_method = make_ibs_register_decorator(__name__)
register_api = controller_inject.get_wbia_flask_api(__name__)
ANNOT_ROWID = 'annot_rowid'
CHIP_ROWID = 'chip_rowid'
FEAT_VECS = 'feature_vecs'
FEAT_KPTS = 'feature_keypoints'
FEAT_NUM_FEAT = 'feature_num_feats'
CONFIG_ROWID = 'config_rowid'
# ---------------------
# ROOT LEAF FUNCTIONS
# ---------------------
NEW_DEPC = True
@register_ibs_method
@accessor_decors.getter_1to1
# register_api('/api/chip/fpath/', methods=['GET'])
def get_annot_chip_fpath(
ibs,
aid_list,
ensure=True,
config2_=None,
check_external_storage=False,
num_retries=1,
):
r"""
Returns the cached chip uri based off of the current
configuration.
Returns:
chip_fpath_list (list): cfpaths defined by ANNOTATIONs
RESTful:
Method: GET
URL: /api/chip/fpath/
"""
return ibs.depc_annot.get(
'chips', aid_list, 'img', config=config2_, ensure=ensure, read_extern=False
)
@register_ibs_method
@accessor_decors.getter_1to1
# @register_api('/api/chip/', methods=['GET'])
def get_annot_chips(ibs, aid_list, config2_=None, ensure=True, verbose=False, eager=True):
r"""
Args:
ibs (IBEISController): wbia controller object
aid_list (int): list of annotation ids
ensure (bool): eager evaluation if True
config2_ (QueryRequest): query request object with hyper-parameters
Returns:
list: chip_list
CommandLine:
python -m wbia.control.manual_chip_funcs get_annot_chips
RESTful:
Method: GET
URL: /api/chip/
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.control.manual_chip_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[0:5]
>>> config2_ = {'dim_size': 450, 'resize_dim': 'area'}
>>> chip_list = get_annot_chips(ibs, aid_list, config2_)
>>> chip_sum_list = [chip.sum() for chip in chip_list]
>>> target = [96053684, 65140000, 67223205, 109367378, 73995663]
>>> ut.assert_almost_eq(chip_sum_list, target, 15000)
>>> print(chip_sum_list)
"""
return ibs.depc_annot.get('chips', aid_list, 'img', config=config2_, ensure=ensure)
@register_ibs_method
@accessor_decors.getter_1to1
# @cache_getter(const.ANNOTATION_TABLE, 'chipsizes')
# @register_api('/api/chip/sizes/', methods=['GET'])
def get_annot_chip_sizes(ibs, aid_list, ensure=True, config2_=None):
r"""
Args:
ibs (IBEISController): wbia controller object
aid_list (int): list of annotation ids
ensure (bool): eager evaluation if True
Returns:
list: chipsz_list - the (width, height) of computed annotation chips.
CommandLine:
python -m wbia.control.manual_chip_funcs get_annot_chip_sizes
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.control.manual_chip_funcs import * # NOQA
>>> import wbia
>>> # build test data
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[0:3]
>>> ensure = True
>>> config2_ = {'dim_size': 450, 'resize_dim': 'area'}
>>> # execute function
>>> chipsz_list = get_annot_chip_sizes(ibs, aid_list, ensure, config2_=config2_)
>>> # verify results
>>> result = str(chipsz_list)
>>> print(result)
[(545, 372), (603, 336), (520, 390)]
"""
return ibs.depc_annot.get(
'chips', aid_list, ('width', 'height'), config=config2_, ensure=ensure
)
@register_ibs_method
def get_annot_chip_dlensqrd(ibs, aid_list, config2_=None):
r"""
Args:
ibs (IBEISController): wbia controller object
aid_list (list):
Returns:
list: topx2_dlen_sqrd
CommandLine:
python -m wbia.control.manual_chip_funcs get_annot_chip_dlensqrd
RESTful:
Method: GET
URL: /api/chip/dlensqrd/
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.control.manual_chip_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> config2_ = {'dim_size': 450, 'resize_dim': 'area'}
>>> topx2_dlen_sqrd = ibs.get_annot_chip_dlensqrd(aid_list, config2_=config2_)
>>> result = str(topx2_dlen_sqrd)
>>> print(result)
[435409, 476505, 422500, 422500, 422500, 437924, 405000, 405000, 447805, 420953, 405008, 406265, 512674]
"""
topx2_dlen_sqrd = [
((w ** 2) + (h ** 2))
for (w, h) in ibs.get_annot_chip_sizes(aid_list, config2_=config2_)
]
return topx2_dlen_sqrd
@register_ibs_method
@accessor_decors.getter_1to1
# @register_api('/api/chip/thumbpath/', methods=['GET'])
def get_annot_chip_thumbpath(ibs, aid_list, thumbsize=None, config2_=None):
r"""
just constructs the path. does not compute it. that is done by
api_thumb_delegate
RESTful:
Method: GET
URL: /api/chip/thumbpath/
"""
if thumbsize is None:
thumbsize = ibs.cfg.other_cfg.thumb_size
thumb_dpath = ibs.thumb_dpath
thumb_suffix = '_' + str(thumbsize) + const.CHIP_THUMB_SUFFIX
annot_uuid_list = ibs.get_annot_visual_uuids(aid_list)
thumbpath_list = [
join(thumb_dpath, str(uuid) + thumb_suffix) for uuid in annot_uuid_list
]
return thumbpath_list
@register_ibs_method
@accessor_decors.getter_1to1
# @register_api('/api/chip/thumbtup/', methods=['GET'])
def get_annot_chip_thumbtup(ibs, aid_list, thumbsize=None, config2_=None):
r"""
get chip thumb info
The return type of this is interpreted and computed in
~/code/guitool/guitool/api_thumb_delegate.py
Args:
aid_list (list):
thumbsize (int):
Returns:
list: thumbtup_list - [(thumb_path, img_path, imgsize, bboxes, thetas)]
CommandLine:
python -m wbia.control.manual_chip_funcs --test-get_annot_chip_thumbtup
RESTful:
Method: GET
URL: /api/chip/thumbtup/
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.control.manual_chip_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[1:2]
>>> thumbsize = 128
>>> result = get_annot_chip_thumbtup(ibs, aid_list, thumbsize)
>>> print(result)
"""
# isiterable = isinstance(aid_list, (list, tuple, np.ndarray))
# if not isiterable:
# aid_list = [aid_list]
# HACK TO MAKE CHIPS COMPUTE
# cid_list = ibs.get _annot_chip_rowids(aid_list, ensure=True) # NOQA
# thumbsize = 256
if thumbsize is None:
thumbsize = ibs.cfg.other_cfg.thumb_size
thumb_gpaths = ibs.get_annot_chip_thumbpath(
aid_list, thumbsize=thumbsize, config2_=config2_
)
# logger.info(thumb_gpaths)
chip_paths = ibs.get_annot_chip_fpath(aid_list, ensure=True, config2_=config2_)
chipsize_list = ibs.get_annot_chip_sizes(aid_list, ensure=False, config2_=config2_)
thumbtup_list = [
(thumb_path, chip_path, chipsize, [], [], [])
for (thumb_path, chip_path, chipsize) in zip(
thumb_gpaths, chip_paths, chipsize_list
)
]
# if not isiterable:
# return thumbtup_list[0]
return thumbtup_list
@register_ibs_method
@accessor_decors.getter_1to1
def get_annot_chip_thumb_path2(ibs, aid_list, thumbsize=None, config=None):
r"""
get chip thumb info
The return type of this is interpreted and computed in
~/code/guitool/guitool/api_thumb_delegate.py
Args:
aid_list (list):
thumbsize (int):
Returns:
list: thumbtup_list - [(thumb_path, img_path, imgsize, bboxes, thetas)]
CommandLine:
python -m wbia.control.manual_chip_funcs --test-get_annot_chip_thumbtup
RESTful:
Method: GET
URL: /api/chip/thumbtup/
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.control.manual_chip_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[1:2]
>>> thumbsize = 128
>>> result = get_annot_chip_thumbtup(ibs, aid_list, thumbsize)
>>> print(result)
"""
if thumbsize is not None:
config = {} if config is None else config.copy()
config['thumbsize'] = thumbsize
imgpath_list = ibs.depc_annot.get(
'chipthumb', aid_list, 'img', config=config, read_extern=False
)
return imgpath_list
@register_ibs_method
@accessor_decors.deleter
# @register_api('/api/chip/', methods=['DELETE'])
def delete_annot_chips(ibs, aid_list, config2_=None, fallback=True):
r"""
Clears annotation data (does not remove the annotation)
RESTful:
Method: DELETE
URL: /api/chip/
"""
# FIXME: Should config2_ be passed down?
# Not sure why it isn't currently
thumbpath_list = ibs.get_annot_chip_thumbpath(aid_list)
# logger.info(thumbpath_list)
# ut.remove_fpaths(thumbpath_list, quiet=quiet, lbl='chip_thumbs')
ut.remove_existing_fpaths(thumbpath_list, quiet=False, lbl='chip_thumbs')
ibs.depc_annot.delete_property('chips', aid_list, config=config2_)
# Fallback
if fallback:
ibs.depc_annot.delete_property_all('chips', aid_list)
return
@register_ibs_method
@accessor_decors.getter_1to1
# @register_api('/api/pchip/', methods=['GET'])
def get_part_chips(
ibs, part_rowid_list, config2_=None, ensure=True, verbose=False, eager=True
):
r"""
Args:
ibs (IBEISController): wbia controller object
part_rowid_list (int): list of part ids
ensure (bool): eager evaluation if True
config2_ (QueryRequest): query request object with hyper-parameters
Returns:
list: chip_list
CommandLine:
python -m wbia.control.manual_chip_funcs get_part_chips
RESTful:
Method: GET
URL: /api/pchip/
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.control.manual_chip_funcs import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()
>>> aid_list = aid_list[:10]
>>> bbox_list = ibs.get_annot_bboxes(aid_list)
>>> bbox_list = [
>>> (xtl + 100, ytl + 100, w - 100, h - 100)
>>> for xtl, ytl, w, h in bbox_list
>>> ]
>>> part_rowid_list = ibs.add_parts(aid_list, bbox_list=bbox_list)
>>> config2_ = {'dim_size': 450, 'resize_dim': 'area'}
>>> chip_list = get_part_chips(ibs, part_rowid_list, config2_)
>>> chip_sum_list = [chip.sum() for chip in chip_list]
>>> target = [86765003, 62005000, 61333186, 111424764, 63590900, 51397198, 139395045, 84100000, 41254190, 89657450]
>>> ut.assert_almost_eq(chip_sum_list, target, 50000)
>>> print(chip_sum_list)
"""
return ibs.depc_part.get(
'pchips', part_rowid_list, 'img', config=config2_, ensure=ensure
)
@register_ibs_method
@accessor_decors.deleter
# @register_api('/api/chip/', methods=['DELETE'])
def delete_part_chips(ibs, part_rowid_list, config2_=None):
r"""
Clears part data
RESTful:
Method: DELETE
URL: /api/pchip/
"""
ibs.depc_part.delete_property('pchips', part_rowid_list, config=config2_)
return
def testdata_ibs():
r""""""
import wbia
ibs = wbia.opendb('testdb1')
config2_ = None
return ibs, config2_
|
PypiClean
|
/PolicyEngine-UK-0.55.2.tar.gz/PolicyEngine-UK-0.55.2/policyengine_uk/reforms/cps/marriage_tax_reforms.py
|
from policyengine_uk.model_api import *
from typing import Union, Optional
def create_expanded_ma_reform(
max_child_age: Optional[int] = None,
child_education_levels: Optional[List[str]] = None,
) -> Reform:
class meets_expanded_ma_conditions(Variable):
label = "Qualifies for an expanded Marriage Allowance"
entity = Person
definition_period = YEAR
value_type = bool
def formula(person, period):
# There is a child who either meets the age condition or the education condition
benunit = person.benunit
if max_child_age is not None:
child_meets_age_condition = (
person("age", period) <= max_child_age
)
return benunit.any(child_meets_age_condition)
if child_education_levels is not None:
child_meets_education_condition = np.is_in(
person("education_level", period).decode_to_str(),
child_education_levels,
)
return benunit.any(child_meets_education_condition)
return True
class meets_marriage_allowance_income_conditions(Variable):
label = "Meets Marriage Allowance income conditions"
documentation = "Whether this person (and their partner) meets the conditions for this person to be eligible for the Marriage Allowance, as set out in the Income Tax Act 2007 sections 55B and 55C"
entity = Person
definition_period = YEAR
value_type = bool
reference = "https://www.legislation.gov.uk/ukpga/2007/3/section/55B"
def formula(person, period):
band = person("tax_band", period)
bands = band.possible_values
return (
(band == bands.BASIC)
| (band == bands.STARTER)
| (band == bands.INTERMEDIATE)
| ( # Expand to higher bands if reform conditions are met.
person("meets_expanded_ma_conditions", period)
& ((band == bands.HIGHER) | (band == bands.ADDITIONAL))
)
)
class marriage_allowance(Variable):
value_type = float
entity = Person
label = "Marriage Allowance"
definition_period = YEAR
reference = (
"https://www.legislation.gov.uk/ukpga/2007/3/part/3/chapter/3A"
)
unit = GBP
def formula(person, period, parameters):
marital = person("marital_status", period)
married = marital == marital.possible_values.MARRIED
eligible = married & person(
"meets_marriage_allowance_income_conditions", period
)
transferable_amount = person(
"partners_unused_personal_allowance", period
)
allowances = parameters(period).gov.hmrc.income_tax.allowances
takeup_rate = allowances.marriage_allowance.takeup_rate
capped_percentage = allowances.marriage_allowance.max
expanded_ma_cap = parameters(
period
).gov.contrib.cps.marriage_tax_reforms.expanded_ma.ma_rate
capped_percentage = where(
person("meets_expanded_ma_conditions", period),
expanded_ma_cap,
capped_percentage,
)
max_amount = (
allowances.personal_allowance.amount * capped_percentage
)
amount_if_eligible_pre_rounding = min_(
transferable_amount, max_amount
)
# Round up.
rounding_increment = (
allowances.marriage_allowance.rounding_increment
)
amount_if_eligible = (
np.ceil(amount_if_eligible_pre_rounding / rounding_increment)
* rounding_increment
)
takes_up = random(person) < takeup_rate
return eligible * amount_if_eligible * takes_up
class reform(Reform):
def apply(self):
self.add_variable(meets_expanded_ma_conditions)
self.update_variable(meets_marriage_allowance_income_conditions)
self.update_variable(marriage_allowance)
return reform
def create_marriage_neutral_income_tax_reform(
max_child_age: Optional[int] = None,
child_education_levels: Optional[List[str]] = None,
) -> Reform:
class meets_ma_neutral_tax_conditions(Variable):
label = "Qualifies for an expanded Marriage Allowance"
entity = Person
definition_period = YEAR
value_type = bool
def formula(person, period):
# There is a child who either meets the age condition or the education condition
benunit = person.benunit
if max_child_age is not None:
child_meets_age_condition = (
person("age", period) <= max_child_age
)
return benunit.any(child_meets_age_condition)
if child_education_levels is not None:
child_meets_education_condition = np.is_in(
person("education_level", period).decode_to_str(),
child_education_levels,
)
return benunit.any(child_meets_education_condition)
return True
class unadjusted_net_income(Variable):
value_type = float
entity = Person
label = "Taxable income after tax reliefs and before allowances"
definition_period = YEAR
reference = "Income Tax Act 2007 s. 23"
unit = GBP
def formula(person, period, parameters):
COMPONENTS = [
"taxable_employment_income",
"taxable_pension_income",
"taxable_social_security_income",
"taxable_self_employment_income",
"taxable_property_income",
"taxable_savings_interest_income",
"taxable_dividend_income",
"taxable_miscellaneous_income",
]
if parameters(
period
).gov.contrib.ubi_center.basic_income.interactions.include_in_taxable_income:
COMPONENTS.append("basic_income")
return max_(0, add(person, period, COMPONENTS))
class adjusted_net_income(Variable):
label = "Optimised adjusted net income"
documentation = "Adjusted net income, but split equally between partners if they are married"
entity = Person
definition_period = YEAR
value_type = float
def formula(person, period, parameters):
income = person("unadjusted_net_income", period)
is_adult = person("is_adult", period)
total_income = person.benunit.sum(is_adult * income)
has_spouse = person.benunit("is_married", period) & is_adult
originally_split_income_branch = person.simulation.get_branch(
"originally_split_income", clone_system=True
)
originally_split_income_branch.set_input(
"adjusted_net_income", period, income
)
originally_split_income_tax = person.benunit.sum(
originally_split_income_branch.calculate("income_tax", period)
)
split_income = where(
has_spouse & person("meets_ma_neutral_tax_conditions", period),
total_income / 2,
income,
)
split_income_branch = person.simulation.get_branch(
"split_income", clone_system=True
)
split_income_branch.set_input(
"adjusted_net_income", period, split_income
)
split_income_tax = person.benunit.sum(
split_income_branch.calculate("income_tax", period)
)
return where(
split_income_tax <= originally_split_income_tax,
split_income,
income,
)
class reform(Reform):
def apply(self):
self.add_variable(meets_ma_neutral_tax_conditions)
self.update_variable(adjusted_net_income)
self.add_variable(unadjusted_net_income)
return reform
def create_marriage_tax_reform(parameters, period):
cps = parameters(period).gov.contrib.cps.marriage_tax_reforms
remove_income_condition = cps.expanded_ma.remove_income_condition
rate = cps.expanded_ma.ma_rate
original_rate = parameters(
period
).gov.hmrc.income_tax.allowances.marriage_allowance.max
ma_max_child_age = cps.expanded_ma.max_child_age
neutralise_income_tax = cps.marriage_neutral_it.neutralise_income_tax
it_max_child_age = cps.marriage_neutral_it.max_child_age
if remove_income_condition or rate != original_rate:
ma_reform = create_expanded_ma_reform(
max_child_age=ma_max_child_age if ma_max_child_age > 0 else None,
)
else:
ma_reform = None
if neutralise_income_tax:
it_reform = create_marriage_neutral_income_tax_reform(
max_child_age=it_max_child_age if it_max_child_age > 0 else None,
)
else:
it_reform = None
if ma_reform is not None:
if it_reform is not None:
return ma_reform, it_reform
else:
return ma_reform
else:
if it_reform is not None:
return it_reform
else:
return None
|
PypiClean
|
/fds.sdk.FactSetPrices-1.1.6-py3-none-any.whl/fds/sdk/FactSetPrices/model/splits.py
|
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetPrices.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetPrices.exceptions import ApiAttributeError
class Splits(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'fsym_id': (str, none_type,), # noqa: E501
'date': (date, none_type,), # noqa: E501
'split_factor': (float, none_type,), # noqa: E501
'split_comment': (str, none_type,), # noqa: E501
'request_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fsym_id': 'fsymId', # noqa: E501
'date': 'date', # noqa: E501
'split_factor': 'splitFactor', # noqa: E501
'split_comment': 'splitComment', # noqa: E501
'request_id': 'requestId', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Splits - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fsym_id (str, none_type): Factset Regional Security Identifier. Six alpha-numeric characters, excluding vowels, with an -R suffix (XXXXXX-R). Identifies the security's best regional security data series per currency. For equities, all primary listings per region and currency are allocated a regional-level permanent identifier. The regional-level permanent identifier will be available once a SEDOL representing the region/currency has been allocated and the identifiers are on FactSet.. [optional] # noqa: E501
date (date, none_type): Ex-Date of the split expressed in YYYY-MM-DD format.. [optional] # noqa: E501
split_factor (float, none_type): Split adjustment factor for n splits ago. A 2-for-1 split returns .50, the number you would multiply the stock price by to adjust for the split.. [optional] # noqa: E501
split_comment (str, none_type): Description for the type of split or spin off.. [optional] # noqa: E501
request_id (str): Identifier that was used for the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Splits - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fsym_id (str, none_type): Factset Regional Security Identifier. Six alpha-numeric characters, excluding vowels, with an -R suffix (XXXXXX-R). Identifies the security's best regional security data series per currency. For equities, all primary listings per region and currency are allocated a regional-level permanent identifier. The regional-level permanent identifier will be available once a SEDOL representing the region/currency has been allocated and the identifiers are on FactSet.. [optional] # noqa: E501
date (date, none_type): Ex-Date of the split expressed in YYYY-MM-DD format.. [optional] # noqa: E501
split_factor (float, none_type): Split adjustment factor for n splits ago. A 2-for-1 split returns .50, the number you would multiply the stock price by to adjust for the split.. [optional] # noqa: E501
split_comment (str, none_type): Description for the type of split or spin off.. [optional] # noqa: E501
request_id (str): Identifier that was used for the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/zidiscord.py-1.7.3.3.tar.gz/zidiscord.py-1.7.3.3/discord/opus.py
|
import array
import ctypes
import ctypes.util
import logging
import math
import os.path
import struct
import sys
from .errors import DiscordException
log = logging.getLogger(__name__)
c_int_ptr = ctypes.POINTER(ctypes.c_int)
c_int16_ptr = ctypes.POINTER(ctypes.c_int16)
c_float_ptr = ctypes.POINTER(ctypes.c_float)
_lib = None
class EncoderStruct(ctypes.Structure):
pass
class DecoderStruct(ctypes.Structure):
pass
EncoderStructPtr = ctypes.POINTER(EncoderStruct)
DecoderStructPtr = ctypes.POINTER(DecoderStruct)
## Some constants from opus_defines.h
# Error codes
OK = 0
BAD_ARG = -1
# Encoder CTLs
APPLICATION_AUDIO = 2049
APPLICATION_VOIP = 2048
APPLICATION_LOWDELAY = 2051
CTL_SET_BITRATE = 4002
CTL_SET_BANDWIDTH = 4008
CTL_SET_FEC = 4012
CTL_SET_PLP = 4014
CTL_SET_SIGNAL = 4024
# Decoder CTLs
CTL_SET_GAIN = 4034
CTL_LAST_PACKET_DURATION = 4039
band_ctl = {
'narrow': 1101,
'medium': 1102,
'wide': 1103,
'superwide': 1104,
'full': 1105,
}
signal_ctl = {
'auto': -1000,
'voice': 3001,
'music': 3002,
}
def _err_lt(result, func, args):
if result < OK:
log.info('error has happened in %s', func.__name__)
raise OpusError(result)
return result
def _err_ne(result, func, args):
ret = args[-1]._obj
if ret.value != OK:
log.info('error has happened in %s', func.__name__)
raise OpusError(ret.value)
return result
# A list of exported functions.
# The first argument is obviously the name.
# The second one are the types of arguments it takes.
# The third is the result type.
# The fourth is the error handler.
exported_functions = [
# Generic
('opus_get_version_string',
None, ctypes.c_char_p, None),
('opus_strerror',
[ctypes.c_int], ctypes.c_char_p, None),
# Encoder functions
('opus_encoder_get_size',
[ctypes.c_int], ctypes.c_int, None),
('opus_encoder_create',
[ctypes.c_int, ctypes.c_int, ctypes.c_int, c_int_ptr], EncoderStructPtr, _err_ne),
('opus_encode',
[EncoderStructPtr, c_int16_ptr, ctypes.c_int, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int32, _err_lt),
('opus_encode_float',
[EncoderStructPtr, c_float_ptr, ctypes.c_int, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int32, _err_lt),
('opus_encoder_ctl',
None, ctypes.c_int32, _err_lt),
('opus_encoder_destroy',
[EncoderStructPtr], None, None),
# Decoder functions
('opus_decoder_get_size',
[ctypes.c_int], ctypes.c_int, None),
('opus_decoder_create',
[ctypes.c_int, ctypes.c_int, c_int_ptr], DecoderStructPtr, _err_ne),
('opus_decode',
[DecoderStructPtr, ctypes.c_char_p, ctypes.c_int32, c_int16_ptr, ctypes.c_int, ctypes.c_int],
ctypes.c_int, _err_lt),
('opus_decode_float',
[DecoderStructPtr, ctypes.c_char_p, ctypes.c_int32, c_float_ptr, ctypes.c_int, ctypes.c_int],
ctypes.c_int, _err_lt),
('opus_decoder_ctl',
None, ctypes.c_int32, _err_lt),
('opus_decoder_destroy',
[DecoderStructPtr], None, None),
('opus_decoder_get_nb_samples',
[DecoderStructPtr, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int, _err_lt),
# Packet functions
('opus_packet_get_bandwidth',
[ctypes.c_char_p], ctypes.c_int, _err_lt),
('opus_packet_get_nb_channels',
[ctypes.c_char_p], ctypes.c_int, _err_lt),
('opus_packet_get_nb_frames',
[ctypes.c_char_p, ctypes.c_int], ctypes.c_int, _err_lt),
('opus_packet_get_samples_per_frame',
[ctypes.c_char_p, ctypes.c_int], ctypes.c_int, _err_lt),
]
def libopus_loader(name):
# create the library...
lib = ctypes.cdll.LoadLibrary(name)
# register the functions...
for item in exported_functions:
func = getattr(lib, item[0])
try:
if item[1]:
func.argtypes = item[1]
func.restype = item[2]
except KeyError:
pass
try:
if item[3]:
func.errcheck = item[3]
except KeyError:
log.exception("Error assigning check function to %s", func)
return lib
def _load_default():
global _lib
try:
if sys.platform == 'win32':
_basedir = os.path.dirname(os.path.abspath(__file__))
_bitness = struct.calcsize('P') * 8
_target = 'x64' if _bitness > 32 else 'x86'
_filename = os.path.join(_basedir, 'bin', 'libopus-0.{}.dll'.format(_target))
_lib = libopus_loader(_filename)
else:
_lib = libopus_loader(ctypes.util.find_library('opus'))
except Exception:
_lib = None
return _lib is not None
def load_opus(name):
"""Loads the libopus shared library for use with voice.
If this function is not called then the library uses the function
:func:`ctypes.util.find_library` and then loads that one if available.
Not loading a library and attempting to use PCM based AudioSources will
lead to voice not working.
This function propagates the exceptions thrown.
.. warning::
The bitness of the library must match the bitness of your python
interpreter. If the library is 64-bit then your python interpreter
must be 64-bit as well. Usually if there's a mismatch in bitness then
the load will throw an exception.
.. note::
On Windows, this function should not need to be called as the binaries
are automatically loaded.
.. note::
On Windows, the .dll extension is not necessary. However, on Linux
the full extension is required to load the library, e.g. ``libopus.so.1``.
On Linux however, :func:`ctypes.util.find_library` will usually find the library automatically
without you having to call this.
Parameters
----------
name: :class:`str`
The filename of the shared library.
"""
global _lib
_lib = libopus_loader(name)
def is_loaded():
"""Function to check if opus lib is successfully loaded either
via the :func:`ctypes.util.find_library` call of :func:`load_opus`.
This must return ``True`` for voice to work.
Returns
-------
:class:`bool`
Indicates if the opus library has been loaded.
"""
global _lib
return _lib is not None
class OpusError(DiscordException):
"""An exception that is thrown for libopus related errors.
Attributes
----------
code: :class:`int`
The error code returned.
"""
def __init__(self, code):
self.code = code
msg = _lib.opus_strerror(self.code).decode('utf-8')
log.info('"%s" has happened', msg)
super().__init__(msg)
class OpusNotLoaded(DiscordException):
"""An exception that is thrown for when libopus is not loaded."""
pass
class _OpusStruct:
SAMPLING_RATE = 48000
CHANNELS = 2
FRAME_LENGTH = 20 # in milliseconds
SAMPLE_SIZE = struct.calcsize('h') * CHANNELS
SAMPLES_PER_FRAME = int(SAMPLING_RATE / 1000 * FRAME_LENGTH)
FRAME_SIZE = SAMPLES_PER_FRAME * SAMPLE_SIZE
@staticmethod
def get_opus_version() -> str:
if not is_loaded() and not _load_default():
raise OpusNotLoaded()
return _lib.opus_get_version_string().decode('utf-8')
class Encoder(_OpusStruct):
def __init__(self, application=APPLICATION_AUDIO):
_OpusStruct.get_opus_version()
self.application = application
self._state = self._create_state()
self.set_bitrate(128)
self.set_fec(True)
self.set_expected_packet_loss_percent(0.15)
self.set_bandwidth('full')
self.set_signal_type('auto')
def __del__(self):
if hasattr(self, '_state'):
_lib.opus_encoder_destroy(self._state)
self._state = None
def _create_state(self):
ret = ctypes.c_int()
return _lib.opus_encoder_create(self.SAMPLING_RATE, self.CHANNELS, self.application, ctypes.byref(ret))
def set_bitrate(self, kbps):
kbps = min(512, max(16, int(kbps)))
_lib.opus_encoder_ctl(self._state, CTL_SET_BITRATE, kbps * 1024)
return kbps
def set_bandwidth(self, req):
if req not in band_ctl:
raise KeyError('%r is not a valid bandwidth setting. Try one of: %s' % (req, ','.join(band_ctl)))
k = band_ctl[req]
_lib.opus_encoder_ctl(self._state, CTL_SET_BANDWIDTH, k)
def set_signal_type(self, req):
if req not in signal_ctl:
raise KeyError('%r is not a valid signal setting. Try one of: %s' % (req, ','.join(signal_ctl)))
k = signal_ctl[req]
_lib.opus_encoder_ctl(self._state, CTL_SET_SIGNAL, k)
def set_fec(self, enabled=True):
_lib.opus_encoder_ctl(self._state, CTL_SET_FEC, 1 if enabled else 0)
def set_expected_packet_loss_percent(self, percentage):
_lib.opus_encoder_ctl(self._state, CTL_SET_PLP, min(100, max(0, int(percentage * 100))))
def encode(self, pcm, frame_size):
max_data_bytes = len(pcm)
pcm = ctypes.cast(pcm, c_int16_ptr)
data = (ctypes.c_char * max_data_bytes)()
ret = _lib.opus_encode(self._state, pcm, frame_size, data, max_data_bytes)
return array.array('b', data[:ret]).tobytes()
class Decoder(_OpusStruct):
def __init__(self):
_OpusStruct.get_opus_version()
self._state = self._create_state()
def __del__(self):
if hasattr(self, '_state'):
_lib.opus_decoder_destroy(self._state)
self._state = None
def _create_state(self):
ret = ctypes.c_int()
return _lib.opus_decoder_create(self.SAMPLING_RATE, self.CHANNELS, ctypes.byref(ret))
@staticmethod
def packet_get_nb_frames(data):
"""Gets the number of frames in an Opus packet"""
return _lib.opus_packet_get_nb_frames(data, len(data))
@staticmethod
def packet_get_nb_channels(data):
"""Gets the number of channels in an Opus packet"""
return _lib.opus_packet_get_nb_channels(data)
@classmethod
def packet_get_samples_per_frame(cls, data):
"""Gets the number of samples per frame from an Opus packet"""
return _lib.opus_packet_get_samples_per_frame(data, cls.SAMPLING_RATE)
def _set_gain(self, adjustment):
"""Configures decoder gain adjustment.
Scales the decoded output by a factor specified in Q8 dB units.
This has a maximum range of -32768 to 32767 inclusive, and returns
OPUS_BAD_ARG (-1) otherwise. The default is zero indicating no adjustment.
This setting survives decoder reset (irrelevant for now).
gain = 10**x/(20.0*256)
(from opus_defines.h)
"""
return _lib.opus_decoder_ctl(self._state, CTL_SET_GAIN, adjustment)
def set_gain(self, dB):
"""Sets the decoder gain in dB, from -128 to 128."""
dB_Q8 = max(-32768, min(32767, round(dB * 256))) # dB * 2^n where n is 8 (Q8)
return self._set_gain(dB_Q8)
def set_volume(self, mult):
"""Sets the output volume as a float percent, i.e. 0.5 for 50%, 1.75 for 175%, etc."""
return self.set_gain(20 * math.log10(mult)) # amplitude ratio
def _get_last_packet_duration(self):
"""Gets the duration (in samples) of the last packet successfully decoded or concealed."""
ret = ctypes.c_int32()
_lib.opus_decoder_ctl(self._state, CTL_LAST_PACKET_DURATION, ctypes.byref(ret))
return ret.value
def decode(self, data, *, fec=False):
if data is None and fec:
raise OpusError("Invalid arguments: FEC cannot be used with null data")
if data is None:
frame_size = self._get_last_packet_duration() or self.SAMPLES_PER_FRAME
channel_count = self.CHANNELS
else:
frames = self.packet_get_nb_frames(data)
channel_count = self.packet_get_nb_channels(data)
samples_per_frame = self.packet_get_samples_per_frame(data)
frame_size = frames * samples_per_frame
pcm = (ctypes.c_int16 * (frame_size * channel_count))()
pcm_ptr = ctypes.cast(pcm, c_int16_ptr)
ret = _lib.opus_decode(self._state, data, len(data) if data else 0, pcm_ptr, frame_size, fec)
return array.array('h', pcm[:ret * channel_count]).tobytes()
|
PypiClean
|
/pydonicli-0.2.5.tar.gz/pydonicli-0.2.5/src/commands/opsys/du_by_filetype.py
|
import click
import pydoni
import pydoni.opsys
from cli_reference.common import Verbose
@click.option('-d', '--dpath', type=click.Path(exists=True), required=True,
help='Full path to target directory.')
@click.option('-o', '--output-fpath', type=click.Path(), default=None,
help='If specified, write program output to this file.')
@click.option('-r', '--recursive', is_flag=True, default=False,
help='Scan recursively and iterate down the directory tree.')
@click.option('-h', '--human-readable', is_flag=True, default=False,
help='Display filesize in output in human-readable format')
@click.option('-p', '--progress', is_flag=True, default=False,
help='Display progress bar while scanning directory')
@click.option('-v', '--verbose', is_flag=True, default=False,
help='Print messages to console.')
@click.command()
def du_by_filetype(dpath, output_fpath, recursive, human_readable, progress, verbose):
"""
List the total filesize in a directory by file type.
"""
args, result = pydoni.pydonicli_declare_args(locals()), dict()
pydoni.pydonicli_register({'command_name': pydoni.what_is_my_name(with_modname=True), 'args': args})
vb = Verbose(verbose)
filesize_dct = pydoni.opsys.du_by_filetype(dpath=dpath,
recursive=recursive,
human_readable=human_readable,
progress=progress,
verbose=verbose)
for ftype, fsize in filesize_dct.items():
print(f'{ftype}: {fsize}')
if isinstance(output_fpath, str):
with open(output_fpath, 'a') as f:
write_lst = []
write_lst.append(f'Directory: "{dpath}"\n')
for ftype, fsize in filesize_dct.items():
write_lst.append(f'{ftype}: {fsize}\n')
f.write(''.join(write_lst).strip())
result['result'] = filesize_dct
pydoni.pydonicli_register({k: v for k, v in locals().items() if k in ['result']})
|
PypiClean
|
/PyScaffold-4.5.tar.gz/PyScaffold-4.5/src/pyscaffold/structure.py
|
from copy import deepcopy
from pathlib import Path
from string import Template
from typing import Callable, Dict, Optional, Tuple, Union, cast
from . import templates
from .file_system import PathLike, create_directory
from .operations import (
FileContents,
FileOp,
ScaffoldOpts,
create,
no_overwrite,
skip_on_update,
)
from .templates import get_template
NO_OVERWRITE = no_overwrite()
SKIP_ON_UPDATE = skip_on_update()
# Sphinx is bad at documenting aliases for the time being... so we repeat the definition
AbstractContent = Union[FileContents, Callable[[ScaffoldOpts], FileContents], Template]
"""*Recipe* for obtaining file contents
::
Union[FileContents, Callable[[ScaffoldOpts], FileContents], Template]
"""
ResolvedLeaf = Tuple[AbstractContent, FileOp]
"""Complete *recipe* for manipulating a file in disk (not only its contents but also the
file operation::
Tuple[AbstractContent, FileOp]
"""
ReifiedLeaf = Tuple[FileContents, FileOp]
"""Similar to :obj:`ResolvedLeaf` but with file contents "reified", i.e. an actual
string instead of a "lazy object" (such as a function or template).
"""
Leaf = Union[AbstractContent, ResolvedLeaf]
"""Just the content of the file OR a tuple of content + file operation
::
Union[AbstractContent, ResolvedLeaf]
"""
# TODO: Replace `dict` when recursive types are processed by mypy
Node = Union[Leaf, dict]
"""Representation of a *file system node* in the project structure (e.g. files,
directories::
Union[Leaf, Structure]
"""
Structure = Dict[str, Node]
"""The directory tree represented as a (possibly nested) dictionary::
Structure = Dict[str, Node]
Node = Union[Leaf, Structure]
The keys indicate the path where a file will be written, while the
value indicates the content.
A nested dictionary represent a nested directory, while :obj:`str`,
:obj:`string.Template` and :obj:`callable` values represent a file to be created.
:obj:`tuple` values are also allowed, and in that case, the first element of the tuple
represents the file content while the second element is a :mod:`pyscaffold.operations
<file operation>` (which can be seen as a recipe on how to create a file with the given
content). :obj:`Callable <callable>` file contents are transformed into strings by
calling them with :obj:`PyScaffold's option dict as argument
<pyscaffold.api.create_structure>`. Similarly, :obj:`string.Template.safe_substitute`
are called with PyScaffold's opts.
The top level keys in the dict are file/dir names relative to the project root, while
keys in nested dicts are relative to the parent's key/location.
For example::
from pyscaffold.operations import no_overwrite
struct: Structure = {
'namespace': {
'module.py': ('print("Hello World!")', no_overwrite())
}
}
represents a ``namespace/module.py`` file inside the project folder
with content ``print("Hello World!")``, that will be created only if not
present.
Note:
:obj:`None` file contents are ignored and not created in disk.
"""
ActionParams = Tuple[Structure, ScaffoldOpts]
"""See :obj:`pyscaffold.actions.ActionParams`"""
# -------- PyScaffold Actions --------
def define_structure(struct: Structure, opts: ScaffoldOpts) -> ActionParams:
"""Creates the project structure as dictionary of dictionaries
Args:
struct : previous directory structure (usually and empty dict)
opts: options of the project
Returns:
Project structure and PyScaffold's options
.. versionchanged:: 4.0
:obj:`string.Template` and functions added directly to the file structure.
"""
files: Structure = {
# Tools
".gitignore": (get_template("gitignore"), NO_OVERWRITE),
".coveragerc": (get_template("coveragerc"), NO_OVERWRITE),
".readthedocs.yml": (get_template("rtd_cfg"), NO_OVERWRITE),
# Project configuration
"pyproject.toml": (templates.pyproject_toml, NO_OVERWRITE),
"setup.py": get_template("setup_py"),
"setup.cfg": (templates.setup_cfg, NO_OVERWRITE),
"tox.ini": (get_template("tox_ini"), NO_OVERWRITE),
# Essential docs
"README.rst": (get_template("readme"), NO_OVERWRITE),
"AUTHORS.rst": (get_template("authors"), NO_OVERWRITE),
"LICENSE.txt": (templates.license, NO_OVERWRITE),
"CHANGELOG.rst": (get_template("changelog"), NO_OVERWRITE),
"CONTRIBUTING.rst": (get_template("contributing"), NO_OVERWRITE),
# Code
"src": {
opts["package"]: {
"__init__.py": templates.init,
"skeleton.py": (get_template("skeleton"), SKIP_ON_UPDATE),
}
},
# Tests
"tests": {
"conftest.py": (get_template("conftest_py"), NO_OVERWRITE),
"test_skeleton.py": (get_template("test_skeleton"), SKIP_ON_UPDATE),
},
# Remaining of the Documentation
"docs": {
"conf.py": get_template("sphinx_conf"),
"authors.rst": get_template("sphinx_authors"),
"contributing.rst": get_template("sphinx_contributing"),
"index.rst": (get_template("sphinx_index"), NO_OVERWRITE),
"readme.rst": get_template("sphinx_readme"),
"license.rst": get_template("sphinx_license"),
"changelog.rst": get_template("sphinx_changelog"),
"Makefile": get_template("sphinx_makefile"),
"_static": {".gitignore": get_template("gitignore_empty")},
"requirements.txt": (get_template("rtd_requirements"), NO_OVERWRITE),
},
}
return merge(struct, files), opts
def create_structure(
struct: Structure, opts: ScaffoldOpts, prefix: Optional[Path] = None
) -> ActionParams:
"""Manifests/reifies a directory structure in the filesystem
Args:
struct: directory structure as dictionary of dictionaries
opts: options of the project
prefix: prefix path for the structure
Returns:
Directory structure as dictionary of dictionaries (similar to input, but only
containing the files that actually changed) and input options
Raises:
TypeError: raised if content type in struct is unknown
.. versionchanged:: 4.0
Also accepts :obj:`string.Template` and :obj:`callable` objects as file contents.
"""
update = opts.get("update") or opts.get("force")
pretend = opts.get("pretend")
if prefix is None:
prefix = cast(Path, opts.get("project_path", "."))
create_directory(prefix, update, pretend)
prefix = Path(prefix)
changed: Structure = {}
for name, node in struct.items():
path = prefix / name
if isinstance(node, dict):
create_directory(path, update, pretend)
changed[name], _ = create_structure(node, opts, prefix=path)
else:
content, file_op = reify_leaf(node, opts)
if file_op(path, content, opts):
changed[name] = content
return changed, opts
# -------- Auxiliary Functions --------
def resolve_leaf(contents: Leaf) -> ResolvedLeaf:
"""Normalize project structure leaf to be a ``Tuple[AbstractContent, FileOp]``"""
if isinstance(contents, tuple):
return contents
return (contents, create)
def reify_content(content: AbstractContent, opts: ScaffoldOpts) -> FileContents:
"""Make sure content is string (calling :meth:`~object.__call__` or
:meth:`~string.Template.safe_substitute` with opts if necessary)
"""
if callable(content):
return content(opts)
if isinstance(content, Template):
return content.safe_substitute(opts)
return content
def reify_leaf(contents: Leaf, opts: ScaffoldOpts) -> ReifiedLeaf:
"""Similar to :obj:`resolve_leaf` but applies :obj:`reify_content` to the first
element of the returned tuple.
"""
file_contents, action = resolve_leaf(contents)
return (reify_content(file_contents, opts), action)
# -------- Structure Manipulation --------
def modify(
struct: Structure,
path: PathLike,
modifier: Callable[[AbstractContent, FileOp], ResolvedLeaf],
) -> Structure:
"""Modify the contents of a file in the representation of the project tree.
If the given path does not exist, the parent directories are automatically
created.
Args:
struct: project representation as (possibly) nested dict. See :obj:`~.merge`.
path: path-like string or object relative to the structure root.
The following examples are equivalent::
from pathlib import Path
'docs/api/index.html'
Path('docs', 'api', 'index.html')
.. versionchanged:: 4.0
The function no longer accepts a list of strings of path parts.
modifier: function (or callable object) that receives the
old content and the old file operation as arguments and returns
a tuple with the new content and new file operation.
Note that, if the file does not exist in ``struct``, ``None`` will
be passed as argument. Example::
modifier = lambda old, op: ((old or '') + 'APPENDED CONTENT'!, op)
modifier = lambda old, op: ('PREPENDED CONTENT!' + (old or ''), op)
.. versionchanged:: 4.0
``modifier`` requires 2 arguments and now is a mandatory argument.
.. versionchanged:: 4.0
``update_rule`` is no longer an argument. Instead the arity ``modifier`` was
changed to accept 2 arguments instead of only 1. This is more suitable to
handling the new :obj:`pyscaffold.operations` API.
Returns:
Updated project tree representation
Note:
Use an empty string as content to ensure a file is created empty
(``None`` contents will not be created).
"""
# Retrieve a list of parts from a path-like object
path_parts = Path(path).parts
# Walk the entire path, creating parents if necessary.
root = deepcopy(struct)
last_parent: dict = root
name = path_parts[-1]
for parent in path_parts[:-1]:
last_parent = last_parent.setdefault(parent, {})
# Get the old value if existent.
old_value = resolve_leaf(last_parent.get(name))
# Update the value.
new_value = modifier(*old_value)
last_parent[name] = _merge_leaf(old_value, new_value)
return root
def ensure(
struct: Structure,
path: PathLike,
content: AbstractContent = None,
file_op: FileOp = create,
) -> Structure:
"""Ensure a file exists in the representation of the project tree
with the provided content.
All the parent directories are automatically created.
Args:
struct: project representation as (possibly) nested.
path: path-like string or object relative to the structure root.
See :obj:`~.modify`.
.. versionchanged:: 4.0
The function no longer accepts a list of strings of path parts.
content: file text contents, ``None`` by default.
The old content is preserved if ``None``.
file_op: see :obj:`pyscaffold.operations`, :obj:`~.create`` by default.
.. versionchanged:: 4.0
Instead of a ``update_rule`` flag, the function now accepts a :obj:`file_op
<pyscaffold.oprtations.FileOp>`.
Returns:
Updated project tree representation
Note:
Use an empty string as content to ensure a file is created empty.
"""
return modify(
struct, path, lambda old, _: (old if content is None else content, file_op)
)
def reject(struct: Structure, path: PathLike) -> Structure:
"""Remove a file from the project tree representation if existent.
Args:
struct: project representation as (possibly) nested.
path: path-like string or object relative to the structure root.
See :obj:`~.modify`.
.. versionchanged:: 4.0
The function no longer accepts a list of strings of path parts.
Returns:
Modified project tree representation
"""
# Retrieve a list of parts from a path-like object
path_parts = Path(path).parts
# Walk the entire path, creating parents if necessary.
root = deepcopy(struct)
last_parent: dict = root
name = path_parts[-1]
for parent in path_parts[:-1]:
if parent not in last_parent:
return root # one ancestor already does not exist, do nothing
last_parent = last_parent[parent]
if name in last_parent:
del last_parent[name]
return root
def merge(old: Structure, new: Structure) -> Structure:
"""Merge two dict representations for the directory structure.
Basically a deep dictionary merge, except from the leaf update method.
Args:
old: directory descriptor that takes low precedence during the merge.
new: directory descriptor that takes high precedence during the merge.
.. versionchanged:: 4.0
Project structure now considers everything **under** the
top level project folder.
Returns:
Resulting merged directory representation
Note:
Use an empty string as content to ensure a file is created empty.
(``None`` contents will not be created).
"""
return _inplace_merge(deepcopy(old), new)
def _inplace_merge(old: Structure, new: Structure) -> Structure:
"""Similar to :obj:`~.merge` but modifies the first dict."""
for key, value in new.items():
old_value = old.get(key, None)
new_is_dict = isinstance(value, dict)
old_is_dict = isinstance(old_value, dict)
if new_is_dict and old_is_dict:
old[key] = _inplace_merge(
cast(Structure, old_value), cast(Structure, value)
)
elif old_value is not None and not new_is_dict and not old_is_dict:
# both are defined and final leaves
old[key] = _merge_leaf(cast(Leaf, old_value), cast(Leaf, value))
else:
old[key] = deepcopy(value)
return old
def _merge_leaf(old_value: Leaf, new_value: Leaf) -> Leaf:
"""Merge leaf values for the directory tree representation.
The leaf value is expected to be a tuple ``(content, update_rule)``.
When a string is passed, it is assumed to be the content and
``None`` is used for the update rule.
Args:
old_value: descriptor for the file that takes low precedence during the merge.
new_value: descriptor for the file that takes high precedence during the merge.
Note:
``None`` contents are ignored, use and empty string to force empty
contents.
Returns:
Resulting value for the merged leaf
"""
old = old_value if isinstance(old_value, (list, tuple)) else (old_value, None)
new = new_value if isinstance(new_value, (list, tuple)) else (new_value, None)
content = old[0] if new[0] is None else new[0]
file_op = old[1] if new[1] is None else new[1]
if file_op is None:
return content
return (content, file_op)
|
PypiClean
|
/dnanew-1.10-py3-none-any.whl/dna_audit_tool/aws_services/s3/s3_service.py
|
import json
import boto3
import botocore
import common.utils.helper as helper
from aws_services.s3 import s3_constants as constants
from common.constants import application_constants
from common.utils.initialize_logger import logger
class S3:
"""A class that checks the security settings of Amazon S3 buckets."""
def __init__(self):
"""
Initializes an s3 client object with the specified maximum number of retries in specified region.
"""
try:
configuration = helper.get_configuration()
self.s3_client = boto3.client('s3',config = configuration)
except Exception as ex:
logger.error("Error occurred while initializing s3 client objects: %s", str(ex))
raise ex
def list_resources(self):
"""
Returns a list of all S3 buckets associated with the S3 client.
Parameters:
- None
Returns:
- A list of S3 bucket names.
Raises:
- botocore.exceptions.ClientError: if there is an error communicating with AWS.
"""
try:
buckets = [bucket['Name'] for bucket in self.s3_client.list_buckets()['Buckets']]
return buckets
except botocore.exceptions.ClientError as ex:
logger.error("Error occurred when listing S3 resources: %s", str(ex))
raise ex
def check_encryption_at_rest(self, bucket_name):
"""
Check if encryption at rest is enabled for a given S3 bucket.
Args:
bucket_name (str): Name of the S3 bucket to check.
Returns:
None: This method updates the status of the check in the `self.service` dictionary.
Raises:
Exception: If any error occurs during the check.
"""
logger.info("Checking for encryption at rest for bucket %s", bucket_name)
try:
response = self.s3_client.get_bucket_encryption(Bucket=bucket_name)
if 'ServerSideEncryptionConfiguration' in response and 'Rules' in response['ServerSideEncryptionConfiguration'] and len(response['ServerSideEncryptionConfiguration']['Rules']) > 0:
check_result = application_constants.ResultStatus.PASSED
else:
check_result = application_constants.ResultStatus.FAILED
except self.s3_client.exceptions.ClientError as ex:
error_code = ex.response['Error']['Code']
if error_code == 'ServerSideEncryptionConfigurationNotFoundError':
check_result = application_constants.ResultStatus.FAILED
else:
logger.error("Error checking encryption at rest for bucket %s: %s", bucket_name, str(ex))
check_result = application_constants.ResultStatus.UNKNOWN
except Exception as ex:
logger.error("Error occurred during check for encryption at rest for bucket %s: %s", bucket_name, str(ex))
raise ex
logger.info("Completed checking for encryption at rest for bucket %s", bucket_name)
return check_result
def check_in_transit_encryption(self, bucket_name):
"""
Check if in-transit encryption is enabled for the specified S3 bucket.
Parameters:
bucket_name (str): The name of the S3 bucket to check.
Returns:
None: This method updates the status of the check in the `self.service` dictionary.
"""
logger.info("Checking for in-transit encryption using Secure Transport for bucket %s", bucket_name)
try:
response = self.s3_client.get_bucket_policy(Bucket=bucket_name)
policy = json.loads(response['Policy'])
statements = policy['Statement']
# Check if any statement allows access over insecure transport
for statement in statements:
if statement['Effect'] == 'Deny' and 'Condition' in statement and 'Bool' in statement['Condition'] and 'aws:SecureTransport' in statement['Condition']['Bool'] and statement['Condition']['Bool']['aws:SecureTransport'] == 'false':
check_result = application_constants.ResultStatus.PASSED
break
else:
check_result = application_constants.ResultStatus.FAILED
except self.s3_client.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchBucketPolicy':
check_result = application_constants.ResultStatus.FAILED
else:
logger.error("Error checking in-transit encryption for bucket %s: %s", bucket_name, str(ex))
check_result = application_constants.ResultStatus.UNKNOWN
logger.info("Completed checking for in-transit encryption using Secure Transport for bucket %s", bucket_name)
return check_result
def check_s3_acl(self, bucket_name):
"""
Checks the Access Control List (ACL) of an S3 bucket to determine if it is publicly accessible.
Args:
bucket_name (str): The name of the S3 bucket to check.
Returns:
None: This method updates the status of the check in the `self.service` dictionary.
"""
logger.info("Checking S3 ACL for validating it is publicly accessible for bucket %s", bucket_name)
try:
response = self.s3_client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
for grant in grants:
grantee = grant['Grantee']
permission = grant['Permission']
if 'URI' in grantee and grantee['URI'] in ('http://acs.amazonaws.com/groups/global/AllUsers', 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers') and permission in ('FULL_CONTROL', 'WRITE'):
check_result = application_constants.ResultStatus.FAILED
break
else:
check_result = application_constants.ResultStatus.PASSED
except self.s3_client.exceptions.NoSuchBucket:
logger.error("Bucket %s not found", bucket_name)
check_result = application_constants.ResultStatus.UNKNOWN
except self.s3_client.exceptions.AccessDenied:
logger.error("Access denied when checking ACL for bucket %s", bucket_name)
check_result = application_constants.ResultStatus.UNKNOWN
except self.s3_client.exceptions.ClientError as ex:
logger.error("Error checking ACL for bucket %s: %s", bucket_name, str(ex))
check_result = application_constants.ResultStatus.UNKNOWN
else:
logger.info("Successfully checked ACL for bucket %s", bucket_name)
logger.info("Completed checking S3 ACL for validating it is publicly accessible for bucket %s", bucket_name)
return check_result
def check_s3_policy(self, bucket_name):
"""
Checks the bucket policy of an S3 bucket to determine if public write access or non-secure access is allowed.
Args:
bucket_name (str): The name of the S3 bucket to check.
Returns:
None: This method updates the status of the check in the `self.service` dictionary.
"""
logger.info("Checking S3 Policy for public write access or non-secure access for bucket %s", bucket_name)
try:
response = self.s3_client.get_bucket_policy(Bucket=bucket_name)
policy = json.loads(response['Policy'])
public_write_access_enabled = False
for statement in policy['Statement']:
if 'Effect' in statement and statement['Effect'] == 'Allow':
if 'Principal' in statement and statement['Principal'] == '*':
if 'Action' in statement and ('s3:PutObject' in statement['Action'] or 's3:PutObjectAcl' in statement['Action']):
public_write_access_enabled = True
break
check_result = application_constants.ResultStatus.FAILED if public_write_access_enabled else application_constants.ResultStatus.PASSED
except self.s3_client.exceptions.ClientError as ex:
error_code = ex.response['Error']['Code']
if error_code == 'NoSuchBucketPolicy':
check_result = application_constants.ResultStatus.FAILED
else:
logger.error("Error checking S3 policy for bucket %s: %s", bucket_name, str(ex))
check_result = application_constants.ResultStatus.UNKNOWN
logger.info("Completed checking S3 Policy for public write access or non-secure access for bucket %s", bucket_name)
return check_result
def check_s3_versioning(self, bucket_name):
"""
Checks the versioning status of an S3 bucket.
Args:
bucket_name (str): The name of the S3 bucket to check.
Returns:
None: This method updates the status of the check in the `self.service` dictionary.
"""
logger.info("Checking if S3 versioning is enabled or not for bucket %s", bucket_name)
try:
response = self.s3_client.get_bucket_versioning(Bucket=bucket_name)
check_result = application_constants.ResultStatus.PASSED if response.get('Status') == "Enabled" else application_constants.ResultStatus.FAILED
logger.info("Successfully checked S3 versioning for bucket %s", bucket_name)
except self.s3_client.exceptions.ClientError as ex:
logger.error("Error checking S3 versioning for bucket %s: %s", bucket_name, str(ex))
check_result = application_constants.ResultStatus.UNKNOWN
logger.info("Completed if S3 versioning is enabled or not for bucket %s", bucket_name)
return check_result
def check_bucket_tags(self, bucket_name, required_tags=None):
"""
Checks if the specified S3 bucket has the required tags.
Args:
bucket_name (str): The name of the S3 bucket.
Returns:
dict: A dictionary containing the result of the check.
"""
try:
if required_tags is None:
required_tags = application_constants.Generic.REQUIRED_TAGS
if not required_tags:
check_result = application_constants.ResultStatus.PASSED
else:
tags = self.s3_client.get_bucket_tagging(Bucket=bucket_name)['TagSet']
missing_tags = [tag for tag in required_tags if tag.split(":")[0] not in [t["Key"] for t in tags]]
check_result = application_constants.ResultStatus.PASSED if not any(missing_tags) else application_constants.ResultStatus.FAILED
logger.info(f"Completed checking the tags for S3 bucket {bucket_name}")
return check_result
except botocore.exceptions.ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchTagSet':
check_result = application_constants.ResultStatus.FAILED
else:
logger.error(f"An error occurred while checking the tags for S3 bucket {bucket_name}: {str(ex)}")
check_result = application_constants.ResultStatus.UNKNOWN
logger.info(f"Completed checking the tags for S3 bucket {bucket_name}")
return check_result
except Exception as ex:
logger.exception(f"An error occurred while checking the tags for S3 bucket {bucket_name}: {str(ex)}")
check_result = application_constants.ResultStatus.UNKNOWN
logger.info(f"Completed checking the tags for S3 bucket {bucket_name}")
return check_result
|
PypiClean
|
/Products.Ploneboard-3.6.zip/Products.Ploneboard-3.6/src/Products/Ploneboard/interfaces.py
|
from zope.interface import Interface, Attribute
class IPloneboard(Interface):
"""
Ploneboard is the outmost board object, what shows up in your site.
The board contains forums. Board is folderish. The number of items contained
in Board should be limited and steady.
This is an optional type.
"""
def addForum(id, title, description):
"""
The method add_forum takes id, title and description and creates a
forum inside the board.
Should this go away and rather just use the regular Plone content
creation? That would make it easier to switch content types.
"""
def removeForum(forum_id):
"""
The method remove_forum removes the forum with the specified id from
this board.
"""
def getForum(forum_id):
"""
Return the forum for forum_id, or None.
"""
def getForumIds():
"""
Returns the ids of the forums.
If this is the only board in a site, it should return forum ids for
the entire site, not just inside the board.
"""
def getForums():
"""
Return the forums
If this is the only board in a site, it should return forums for the
entire site, not just inside the board.
"""
def searchComments(query):
"""
This method searches through all forums, conversations and comments.
"""
class IForum(Interface):
"""
A Forum contains conversations. Forum is folderish. The number of items contained
in Forum is high and increases, so it is probably a good idea to use BTrees
for indexing.
"""
def getBoard():
"""
Gets the containing board.
Returns None if there are no boards in the site.
"""
def addConversation(subject, body, **kw):
"""
Adds a new conversation to the forum.
Should this go away and rather just use the regular Plone content
creation? That would make it easier to switch content types.
"""
def getConversation(conversation_id):
"""
Returns the conversation with the given conversation id.
"""
def removeConversation(conversation_id):
"""
Removes a conversation with the given conversation id from the forum.
"""
def getConversations(limit=20, offset=0):
"""
Returns a maximum of 'limit' conversations, the last updated conversations first,
starting from 'offset'.
"""
def getNumberOfConversations():
"""
Returns the number of conversations in this forum.
"""
def getNumberOfComments():
"""
Returns the number of comments to this forum.
"""
class IConversation(Interface):
"""
Conversation contains comments. The number of comments contained in
Conversation is high and increases. It is recommended to use BTree for
indexing and to autogenerate ids for contained comments.
"""
def getForum():
"""
Returns the containing forum.
"""
def addComment(comment_subject, comment_body):
"""
Adds a new comment with subject and body.
"""
def getComment(comment_id):
"""
Returns the comment with the specified id.
"""
def getComments(limit=30, offset=0, **kw):
"""
Retrieves the specified number of comments with offset 'offset'.
In addition there are kw args for sorting and retrieval options.
"""
def getNumberOfComments():
"""
Returns the number of comments to this conversation.
"""
def getLastCommentDate():
"""
Returns a DateTime corresponding to the timestamp of the last comment
for the conversation.
"""
def getLastCommentAuthor():
"""
Returns the author of the last comment for the conversation.
"""
def getLastComment():
"""
Returns the last comment as full object (no Brain).
If there is no such one then None is returned
"""
def getRootComments():
"""
Return a list all comments rooted to the board; ie comments which
are not replies to other comments.
"""
def getFirstComment():
"""
Returns the first (aka root) comment in this IConversation.
"""
class IComment(Interface):
"""
A comment contains regular text body and metadata.
"""
def getConversation():
"""
Returns the containing conversation.
"""
def addReply(comment_subject, comment_body):
"""
Add a response to this comment of same type as object itself.
"""
def inReplyTo():
"""
Returns the comment object this comment is a reply to. If it is the
topmost comment (ie: first comment in a conversation), it returns None.
"""
def getReplies():
"""
Returns the comments that were replies to this one.
"""
def getTitle():
"""
Returns the title of the comment.
"""
def getText():
"""
Returns the text of the comment.
"""
def delete():
"""
Delete this comment. Will ensure to clean up any comments
that were replies to this comment.
"""
class IAttachmentSupport(Interface):
"""
Attachment support, typically for comments
"""
def addAttachment(file, title=None):
"""
Add a file attachment.
"""
def hasAttachment():
"""
Return 0 or 1 if this comment has attachments.
"""
def getNumberOfAllowedAttachments():
"""
Return the number of allowed attachments
"""
def getNumberOfAttachments():
"""
Return the number of attachments
"""
def getAttachments():
"""
Return all attachments
"""
class IPloneboardTool(Interface):
"""Services for Ploneboard: Handles text transformation plugins and attached files.
"""
id = Attribute('id', 'Must be set to "portal_ploneboard"')
def registerTransform(name, module, friendlyName=None):
"""Adds a text transformation module to portal_transforms.
Used from the configuration panel
"""
def unregisterTransform(name):
"""Removes the transformation module from portal_transforms
Used from the configuration panel
"""
def enableTransform(name, enabled=True):
"""Globally enables a transform (site wide)
"""
def unregisterAllTransforms():
"""Removes from portal_transforms all transform modules added with Ploneboard
"""
def getTransforms():
"""Returns list of transform names.
"""
def getTransformFriendlyName(name):
"""Returns a friendly name for the given transform.
"""
def getEnabledTransforms():
"""Returns list of names for enabled transforms.
"""
def performCommentTransform(orig, **kwargs):
"""This performs the comment transform - also used for preview.
"""
def getUploadedFiles():
"""Stores files from request in session and returns these files
"""
def clearUploadedFiles():
"""Removes uploaded files from session machinery
"""
|
PypiClean
|
/formification-1.2.0-py3-none-any.whl/formulaic/static/admin/formulaic/ember-formulaic/node_modules/ws/lib/Receiver.js
|
var util = require('util')
, Validation = require('./Validation').Validation
, ErrorCodes = require('./ErrorCodes')
, BufferPool = require('./BufferPool')
, bufferUtil = require('./BufferUtil').BufferUtil
, PerMessageDeflate = require('./PerMessageDeflate');
/**
* HyBi Receiver implementation
*/
function Receiver (extensions,maxPayload) {
if (this instanceof Receiver === false) {
throw new TypeError("Classes can't be function-called");
}
if(typeof extensions==='number'){
maxPayload=extensions;
extensions={};
}
// memory pool for fragmented messages
var fragmentedPoolPrevUsed = -1;
this.fragmentedBufferPool = new BufferPool(1024, function(db, length) {
return db.used + length;
}, function(db) {
return fragmentedPoolPrevUsed = fragmentedPoolPrevUsed >= 0 ?
Math.ceil((fragmentedPoolPrevUsed + db.used) / 2) :
db.used;
});
// memory pool for unfragmented messages
var unfragmentedPoolPrevUsed = -1;
this.unfragmentedBufferPool = new BufferPool(1024, function(db, length) {
return db.used + length;
}, function(db) {
return unfragmentedPoolPrevUsed = unfragmentedPoolPrevUsed >= 0 ?
Math.ceil((unfragmentedPoolPrevUsed + db.used) / 2) :
db.used;
});
this.extensions = extensions || {};
this.maxPayload = maxPayload || 0;
this.currentPayloadLength = 0;
this.state = {
activeFragmentedOperation: null,
lastFragment: false,
masked: false,
opcode: 0,
fragmentedOperation: false
};
this.overflow = [];
this.headerBuffer = new Buffer(10);
this.expectOffset = 0;
this.expectBuffer = null;
this.expectHandler = null;
this.currentMessage = [];
this.currentMessageLength = 0;
this.messageHandlers = [];
this.expectHeader(2, this.processPacket);
this.dead = false;
this.processing = false;
this.onerror = function() {};
this.ontext = function() {};
this.onbinary = function() {};
this.onclose = function() {};
this.onping = function() {};
this.onpong = function() {};
}
module.exports = Receiver;
/**
* Add new data to the parser.
*
* @api public
*/
Receiver.prototype.add = function(data) {
if (this.dead) return;
var dataLength = data.length;
if (dataLength == 0) return;
if (this.expectBuffer == null) {
this.overflow.push(data);
return;
}
var toRead = Math.min(dataLength, this.expectBuffer.length - this.expectOffset);
fastCopy(toRead, data, this.expectBuffer, this.expectOffset);
this.expectOffset += toRead;
if (toRead < dataLength) {
this.overflow.push(data.slice(toRead));
}
while (this.expectBuffer && this.expectOffset == this.expectBuffer.length) {
var bufferForHandler = this.expectBuffer;
this.expectBuffer = null;
this.expectOffset = 0;
this.expectHandler.call(this, bufferForHandler);
}
};
/**
* Releases all resources used by the receiver.
*
* @api public
*/
Receiver.prototype.cleanup = function() {
this.dead = true;
this.overflow = null;
this.headerBuffer = null;
this.expectBuffer = null;
this.expectHandler = null;
this.unfragmentedBufferPool = null;
this.fragmentedBufferPool = null;
this.state = null;
this.currentMessage = null;
this.onerror = null;
this.ontext = null;
this.onbinary = null;
this.onclose = null;
this.onping = null;
this.onpong = null;
};
/**
* Waits for a certain amount of header bytes to be available, then fires a callback.
*
* @api private
*/
Receiver.prototype.expectHeader = function(length, handler) {
if (length == 0) {
handler(null);
return;
}
this.expectBuffer = this.headerBuffer.slice(this.expectOffset, this.expectOffset + length);
this.expectHandler = handler;
var toRead = length;
while (toRead > 0 && this.overflow.length > 0) {
var fromOverflow = this.overflow.pop();
if (toRead < fromOverflow.length) this.overflow.push(fromOverflow.slice(toRead));
var read = Math.min(fromOverflow.length, toRead);
fastCopy(read, fromOverflow, this.expectBuffer, this.expectOffset);
this.expectOffset += read;
toRead -= read;
}
};
/**
* Waits for a certain amount of data bytes to be available, then fires a callback.
*
* @api private
*/
Receiver.prototype.expectData = function(length, handler) {
if (length == 0) {
handler(null);
return;
}
this.expectBuffer = this.allocateFromPool(length, this.state.fragmentedOperation);
this.expectHandler = handler;
var toRead = length;
while (toRead > 0 && this.overflow.length > 0) {
var fromOverflow = this.overflow.pop();
if (toRead < fromOverflow.length) this.overflow.push(fromOverflow.slice(toRead));
var read = Math.min(fromOverflow.length, toRead);
fastCopy(read, fromOverflow, this.expectBuffer, this.expectOffset);
this.expectOffset += read;
toRead -= read;
}
};
/**
* Allocates memory from the buffer pool.
*
* @api private
*/
Receiver.prototype.allocateFromPool = function(length, isFragmented) {
return (isFragmented ? this.fragmentedBufferPool : this.unfragmentedBufferPool).get(length);
};
/**
* Start processing a new packet.
*
* @api private
*/
Receiver.prototype.processPacket = function (data) {
if (this.extensions[PerMessageDeflate.extensionName]) {
if ((data[0] & 0x30) != 0) {
this.error('reserved fields (2, 3) must be empty', 1002);
return;
}
} else {
if ((data[0] & 0x70) != 0) {
this.error('reserved fields must be empty', 1002);
return;
}
}
this.state.lastFragment = (data[0] & 0x80) == 0x80;
this.state.masked = (data[1] & 0x80) == 0x80;
var compressed = (data[0] & 0x40) == 0x40;
var opcode = data[0] & 0xf;
if (opcode === 0) {
if (compressed) {
this.error('continuation frame cannot have the Per-message Compressed bits', 1002);
return;
}
// continuation frame
this.state.fragmentedOperation = true;
this.state.opcode = this.state.activeFragmentedOperation;
if (!(this.state.opcode == 1 || this.state.opcode == 2)) {
this.error('continuation frame cannot follow current opcode', 1002);
return;
}
}
else {
if (opcode < 3 && this.state.activeFragmentedOperation != null) {
this.error('data frames after the initial data frame must have opcode 0', 1002);
return;
}
if (opcode >= 8 && compressed) {
this.error('control frames cannot have the Per-message Compressed bits', 1002);
return;
}
this.state.compressed = compressed;
this.state.opcode = opcode;
if (this.state.lastFragment === false) {
this.state.fragmentedOperation = true;
this.state.activeFragmentedOperation = opcode;
}
else this.state.fragmentedOperation = false;
}
var handler = opcodes[this.state.opcode];
if (typeof handler == 'undefined') this.error('no handler for opcode ' + this.state.opcode, 1002);
else {
handler.start.call(this, data);
}
};
/**
* Endprocessing a packet.
*
* @api private
*/
Receiver.prototype.endPacket = function() {
if (this.dead) return;
if (!this.state.fragmentedOperation) this.unfragmentedBufferPool.reset(true);
else if (this.state.lastFragment) this.fragmentedBufferPool.reset(true);
this.expectOffset = 0;
this.expectBuffer = null;
this.expectHandler = null;
if (this.state.lastFragment && this.state.opcode === this.state.activeFragmentedOperation) {
// end current fragmented operation
this.state.activeFragmentedOperation = null;
}
this.currentPayloadLength = 0;
this.state.lastFragment = false;
this.state.opcode = this.state.activeFragmentedOperation != null ? this.state.activeFragmentedOperation : 0;
this.state.masked = false;
this.expectHeader(2, this.processPacket);
};
/**
* Reset the parser state.
*
* @api private
*/
Receiver.prototype.reset = function() {
if (this.dead) return;
this.state = {
activeFragmentedOperation: null,
lastFragment: false,
masked: false,
opcode: 0,
fragmentedOperation: false
};
this.fragmentedBufferPool.reset(true);
this.unfragmentedBufferPool.reset(true);
this.expectOffset = 0;
this.expectBuffer = null;
this.expectHandler = null;
this.overflow = [];
this.currentMessage = [];
this.currentMessageLength = 0;
this.messageHandlers = [];
this.currentPayloadLength = 0;
};
/**
* Unmask received data.
*
* @api private
*/
Receiver.prototype.unmask = function (mask, buf, binary) {
if (mask != null && buf != null) bufferUtil.unmask(buf, mask);
if (binary) return buf;
return buf != null ? buf.toString('utf8') : '';
};
/**
* Handles an error
*
* @api private
*/
Receiver.prototype.error = function (reason, protocolErrorCode) {
if (this.dead) return;
this.reset();
if(typeof reason == 'string'){
this.onerror(new Error(reason), protocolErrorCode);
}
else if(reason.constructor == Error){
this.onerror(reason, protocolErrorCode);
}
else{
this.onerror(new Error("An error occured"),protocolErrorCode);
}
return this;
};
/**
* Execute message handler buffers
*
* @api private
*/
Receiver.prototype.flush = function() {
if (this.processing || this.dead) return;
var handler = this.messageHandlers.shift();
if (!handler) return;
this.processing = true;
var self = this;
handler(function() {
self.processing = false;
self.flush();
});
};
/**
* Apply extensions to message
*
* @api private
*/
Receiver.prototype.applyExtensions = function(messageBuffer, fin, compressed, callback) {
var self = this;
if (compressed) {
this.extensions[PerMessageDeflate.extensionName].decompress(messageBuffer, fin, function(err, buffer) {
if (self.dead) return;
if (err) {
callback(new Error('invalid compressed data'));
return;
}
callback(null, buffer);
});
} else {
callback(null, messageBuffer);
}
};
/**
* Checks payload size, disconnects socket when it exceeds maxPayload
*
* @api private
*/
Receiver.prototype.maxPayloadExceeded = function(length) {
if (this.maxPayload=== undefined || this.maxPayload === null || this.maxPayload < 1) {
return false;
}
var fullLength = this.currentPayloadLength + length;
if (fullLength < this.maxPayload) {
this.currentPayloadLength = fullLength;
return false;
}
this.error('payload cannot exceed ' + this.maxPayload + ' bytes', 1009);
this.messageBuffer=[];
this.cleanup();
return true;
};
/**
* Buffer utilities
*/
function readUInt16BE(start) {
return (this[start]<<8) +
this[start+1];
}
function readUInt32BE(start) {
return (this[start]<<24) +
(this[start+1]<<16) +
(this[start+2]<<8) +
this[start+3];
}
function fastCopy(length, srcBuffer, dstBuffer, dstOffset) {
switch (length) {
default: srcBuffer.copy(dstBuffer, dstOffset, 0, length); break;
case 16: dstBuffer[dstOffset+15] = srcBuffer[15];
case 15: dstBuffer[dstOffset+14] = srcBuffer[14];
case 14: dstBuffer[dstOffset+13] = srcBuffer[13];
case 13: dstBuffer[dstOffset+12] = srcBuffer[12];
case 12: dstBuffer[dstOffset+11] = srcBuffer[11];
case 11: dstBuffer[dstOffset+10] = srcBuffer[10];
case 10: dstBuffer[dstOffset+9] = srcBuffer[9];
case 9: dstBuffer[dstOffset+8] = srcBuffer[8];
case 8: dstBuffer[dstOffset+7] = srcBuffer[7];
case 7: dstBuffer[dstOffset+6] = srcBuffer[6];
case 6: dstBuffer[dstOffset+5] = srcBuffer[5];
case 5: dstBuffer[dstOffset+4] = srcBuffer[4];
case 4: dstBuffer[dstOffset+3] = srcBuffer[3];
case 3: dstBuffer[dstOffset+2] = srcBuffer[2];
case 2: dstBuffer[dstOffset+1] = srcBuffer[1];
case 1: dstBuffer[dstOffset] = srcBuffer[0];
}
}
function clone(obj) {
var cloned = {};
for (var k in obj) {
if (obj.hasOwnProperty(k)) {
cloned[k] = obj[k];
}
}
return cloned;
}
/**
* Opcode handlers
*/
var opcodes = {
// text
'1': {
start: function(data) {
var self = this;
// decode length
var firstLength = data[1] & 0x7f;
if (firstLength < 126) {
if (self.maxPayloadExceeded(firstLength)){
self.error('Maximumpayload exceeded in compressed text message. Aborting...', 1009);
return;
}
opcodes['1'].getData.call(self, firstLength);
}
else if (firstLength == 126) {
self.expectHeader(2, function(data) {
var length = readUInt16BE.call(data, 0);
if (self.maxPayloadExceeded(length)){
self.error('Maximumpayload exceeded in compressed text message. Aborting...', 1009);
return;
}
opcodes['1'].getData.call(self, length);
});
}
else if (firstLength == 127) {
self.expectHeader(8, function(data) {
if (readUInt32BE.call(data, 0) != 0) {
self.error('packets with length spanning more than 32 bit is currently not supported', 1008);
return;
}
var length = readUInt32BE.call(data, 4);
if (self.maxPayloadExceeded(length)){
self.error('Maximumpayload exceeded in compressed text message. Aborting...', 1009);
return;
}
opcodes['1'].getData.call(self, readUInt32BE.call(data, 4));
});
}
},
getData: function(length) {
var self = this;
if (self.state.masked) {
self.expectHeader(4, function(data) {
var mask = data;
self.expectData(length, function(data) {
opcodes['1'].finish.call(self, mask, data);
});
});
}
else {
self.expectData(length, function(data) {
opcodes['1'].finish.call(self, null, data);
});
}
},
finish: function(mask, data) {
var self = this;
var packet = this.unmask(mask, data, true) || new Buffer(0);
var state = clone(this.state);
this.messageHandlers.push(function(callback) {
self.applyExtensions(packet, state.lastFragment, state.compressed, function(err, buffer) {
if (err) {
if(err.type===1009){
return self.error('Maximumpayload exceeded in compressed text message. Aborting...', 1009);
}
return self.error(err.message, 1007);
}
if (buffer != null) {
if( self.maxPayload==0 || (self.maxPayload > 0 && (self.currentMessageLength + buffer.length) < self.maxPayload) ){
self.currentMessage.push(buffer);
}
else{
self.currentMessage=null;
self.currentMessage = [];
self.currentMessageLength = 0;
self.error(new Error('Maximum payload exceeded. maxPayload: '+self.maxPayload), 1009);
return;
}
self.currentMessageLength += buffer.length;
}
if (state.lastFragment) {
var messageBuffer = Buffer.concat(self.currentMessage);
self.currentMessage = [];
self.currentMessageLength = 0;
if (!Validation.isValidUTF8(messageBuffer)) {
self.error('invalid utf8 sequence', 1007);
return;
}
self.ontext(messageBuffer.toString('utf8'), {masked: state.masked, buffer: messageBuffer});
}
callback();
});
});
this.flush();
this.endPacket();
}
},
// binary
'2': {
start: function(data) {
var self = this;
// decode length
var firstLength = data[1] & 0x7f;
if (firstLength < 126) {
if (self.maxPayloadExceeded(firstLength)){
self.error('Max payload exceeded in compressed text message. Aborting...', 1009);
return;
}
opcodes['2'].getData.call(self, firstLength);
}
else if (firstLength == 126) {
self.expectHeader(2, function(data) {
var length = readUInt16BE.call(data, 0);
if (self.maxPayloadExceeded(length)){
self.error('Max payload exceeded in compressed text message. Aborting...', 1009);
return;
}
opcodes['2'].getData.call(self, length);
});
}
else if (firstLength == 127) {
self.expectHeader(8, function(data) {
if (readUInt32BE.call(data, 0) != 0) {
self.error('packets with length spanning more than 32 bit is currently not supported', 1008);
return;
}
var length = readUInt32BE.call(data, 4, true);
if (self.maxPayloadExceeded(length)){
self.error('Max payload exceeded in compressed text message. Aborting...', 1009);
return;
}
opcodes['2'].getData.call(self, length);
});
}
},
getData: function(length) {
var self = this;
if (self.state.masked) {
self.expectHeader(4, function(data) {
var mask = data;
self.expectData(length, function(data) {
opcodes['2'].finish.call(self, mask, data);
});
});
}
else {
self.expectData(length, function(data) {
opcodes['2'].finish.call(self, null, data);
});
}
},
finish: function(mask, data) {
var self = this;
var packet = this.unmask(mask, data, true) || new Buffer(0);
var state = clone(this.state);
this.messageHandlers.push(function(callback) {
self.applyExtensions(packet, state.lastFragment, state.compressed, function(err, buffer) {
if (err) {
if(err.type===1009){
return self.error('Max payload exceeded in compressed binary message. Aborting...', 1009);
}
return self.error(err.message, 1007);
}
if (buffer != null) {
if( self.maxPayload==0 || (self.maxPayload > 0 && (self.currentMessageLength + buffer.length) < self.maxPayload) ){
self.currentMessage.push(buffer);
}
else{
self.currentMessage=null;
self.currentMessage = [];
self.currentMessageLength = 0;
self.error(new Error('Maximum payload exceeded'), 1009);
return;
}
self.currentMessageLength += buffer.length;
}
if (state.lastFragment) {
var messageBuffer = Buffer.concat(self.currentMessage);
self.currentMessage = [];
self.currentMessageLength = 0;
self.onbinary(messageBuffer, {masked: state.masked, buffer: messageBuffer});
}
callback();
});
});
this.flush();
this.endPacket();
}
},
// close
'8': {
start: function(data) {
var self = this;
if (self.state.lastFragment == false) {
self.error('fragmented close is not supported', 1002);
return;
}
// decode length
var firstLength = data[1] & 0x7f;
if (firstLength < 126) {
opcodes['8'].getData.call(self, firstLength);
}
else {
self.error('control frames cannot have more than 125 bytes of data', 1002);
}
},
getData: function(length) {
var self = this;
if (self.state.masked) {
self.expectHeader(4, function(data) {
var mask = data;
self.expectData(length, function(data) {
opcodes['8'].finish.call(self, mask, data);
});
});
}
else {
self.expectData(length, function(data) {
opcodes['8'].finish.call(self, null, data);
});
}
},
finish: function(mask, data) {
var self = this;
data = self.unmask(mask, data, true);
var state = clone(this.state);
this.messageHandlers.push(function() {
if (data && data.length == 1) {
self.error('close packets with data must be at least two bytes long', 1002);
return;
}
var code = data && data.length > 1 ? readUInt16BE.call(data, 0) : 1000;
if (!ErrorCodes.isValidErrorCode(code)) {
self.error('invalid error code', 1002);
return;
}
var message = '';
if (data && data.length > 2) {
var messageBuffer = data.slice(2);
if (!Validation.isValidUTF8(messageBuffer)) {
self.error('invalid utf8 sequence', 1007);
return;
}
message = messageBuffer.toString('utf8');
}
self.onclose(code, message, {masked: state.masked});
self.reset();
});
this.flush();
},
},
// ping
'9': {
start: function(data) {
var self = this;
if (self.state.lastFragment == false) {
self.error('fragmented ping is not supported', 1002);
return;
}
// decode length
var firstLength = data[1] & 0x7f;
if (firstLength < 126) {
opcodes['9'].getData.call(self, firstLength);
}
else {
self.error('control frames cannot have more than 125 bytes of data', 1002);
}
},
getData: function(length) {
var self = this;
if (self.state.masked) {
self.expectHeader(4, function(data) {
var mask = data;
self.expectData(length, function(data) {
opcodes['9'].finish.call(self, mask, data);
});
});
}
else {
self.expectData(length, function(data) {
opcodes['9'].finish.call(self, null, data);
});
}
},
finish: function(mask, data) {
var self = this;
data = this.unmask(mask, data, true);
var state = clone(this.state);
this.messageHandlers.push(function(callback) {
self.onping(data, {masked: state.masked, binary: true});
callback();
});
this.flush();
this.endPacket();
}
},
// pong
'10': {
start: function(data) {
var self = this;
if (self.state.lastFragment == false) {
self.error('fragmented pong is not supported', 1002);
return;
}
// decode length
var firstLength = data[1] & 0x7f;
if (firstLength < 126) {
opcodes['10'].getData.call(self, firstLength);
}
else {
self.error('control frames cannot have more than 125 bytes of data', 1002);
}
},
getData: function(length) {
var self = this;
if (this.state.masked) {
this.expectHeader(4, function(data) {
var mask = data;
self.expectData(length, function(data) {
opcodes['10'].finish.call(self, mask, data);
});
});
}
else {
this.expectData(length, function(data) {
opcodes['10'].finish.call(self, null, data);
});
}
},
finish: function(mask, data) {
var self = this;
data = self.unmask(mask, data, true);
var state = clone(this.state);
this.messageHandlers.push(function(callback) {
self.onpong(data, {masked: state.masked, binary: true});
callback();
});
this.flush();
this.endPacket();
}
}
}
|
PypiClean
|
/resoto_plugin_vsphere-3.6.5-py3-none-any.whl/resoto_plugin_vsphere/__init__.py
|
from datetime import datetime
from resotolib.logger import log
from resotolib.config import Config
from resotolib.baseplugin import BaseCollectorPlugin
from resotolib.baseresources import InstanceStatus
from .vsphere_client import get_vsphere_client
from .resources import (
VSphereCluster,
VSphereInstance,
VSphereDataCenter,
VSphereTemplate,
VSphereHost,
VSphereESXiHost,
VSphereDataStore,
VSphereDataStoreCluster,
VSphereResourcePool,
)
from .config import VSphereConfig
from typing import Dict
from pyVmomi import vim
class VSphereCollectorPlugin(BaseCollectorPlugin):
cloud = "vsphere"
instances_dict = {}
def get_host(self) -> VSphereHost:
return VSphereHost(id=Config.vsphere.host)
def get_keymap_from_vmlist(self, list_vm) -> VSphereCluster:
"""
resolve custom key ID into a dict with its name
"""
keyMap = {}
for key in list_vm[0].availableField:
keyMap[key.key] = key.name
return keyMap
def get_custom_attributes(self, vm, keymap):
"""
use custom attribute keymap to resolve key IDs into a dict and
assign custom value.
"""
attr = {}
for value in vm.value:
attr[str(keymap[value.key])] = str(value.value)
return attr
def resource_pool(self, resourcepool, predecessor):
rpObj = VSphereResourcePool(id=resourcepool._moId, name=resourcepool.name)
self.graph.add_resource(predecessor, rpObj)
log.debug(f"Found ResourcePool - {resourcepool._moId} - {resourcepool.name}")
for vm in resourcepool.vm:
self.graph.add_edge(rpObj, self.instances_dict[vm._moId])
for successorPool in resourcepool.resourcePool:
log.debug(f"Found nested ResourcePool - {successorPool._moId} - {successorPool.name}")
self.resource_pool(successorPool, rpObj)
def get_instances(self) -> None:
"""
loop over VMs and add them as VSphereInstance to parent
"""
content = get_vsphere_client().client.RetrieveContent()
container = content.rootFolder # starting point to look into
view_type = [vim.VirtualMachine] # object types to look for
recursive = True # whether we should look into it recursively
container_view = content.viewManager.CreateContainerView(container, view_type, recursive)
vms = container_view.view
keys = self.get_keymap_from_vmlist(vms)
instance_status_map: Dict[str, InstanceStatus] = {
"pending": InstanceStatus.BUSY,
"running": InstanceStatus.RUNNING,
"shutting-down": InstanceStatus.BUSY,
"terminated": InstanceStatus.TERMINATED,
"stopping": InstanceStatus.BUSY,
"notRunning": InstanceStatus.STOPPED,
}
# loop over the list of VMs
for list_vm in vms:
try:
tags = self.get_custom_attributes(list_vm, keys)
try:
ctime = datetime.fromtimestamp(list_vm.config.createDate.timestamp())
except AttributeError:
ctime = None
if list_vm.config.template:
vm = VSphereTemplate(id=list_vm._moId, name=str(list_vm.name), ctime=ctime)
else:
vm = VSphereInstance(
id=list_vm._moId,
name=str(list_vm.name),
instance_cores=int(list_vm.config.hardware.numCPU),
instance_memory=int(list_vm.config.hardware.memoryMB / 1024),
tags=tags,
ctime=ctime,
instance_status=instance_status_map.get(list_vm.guest.guestState, InstanceStatus.UNKNOWN),
)
except Exception:
log.exception(f"Error while collecting {list_vm}")
else:
log.debug(f"found {vm.id} - {vm}")
self.instances_dict[vm.id] = vm
self.graph.add_node(vm)
def collect(self) -> None:
log.debug("plugin: collecting vsphere resources")
if not Config.vsphere.host:
log.debug("no VSphere host given - skipping collection")
return
host = self.get_host()
self.get_instances()
log.debug(f"found {len(self.instances_dict)} instances and templates")
self.graph.add_resource(self.graph.root, host)
content = get_vsphere_client().client.RetrieveContent()
datacenters = [entity for entity in content.rootFolder.childEntity if hasattr(entity, "vmFolder")]
# datacenter are root folder objects
for dc in datacenters:
log.debug(f"Found datacenter - {dc._moId} - {dc.name}")
dcObj = VSphereDataCenter(id=dc._moId, name=dc.name)
self.graph.add_resource(host, dcObj)
# get clusters in datacenter
for cluster in dc.hostFolder.childEntity:
log.debug(f"Found cluster - {cluster._moId} - {cluster.name}")
clusterObj = VSphereCluster(id=cluster._moId, name=cluster.name)
self.graph.add_resource(dcObj, clusterObj)
try:
rpool = cluster.resourcePool
self.resource_pool(rpool, clusterObj)
except Exception:
log.warning(f"Resourcepool error for cluster {cluster._moId} {cluster.name}")
for datastore in dc.datastoreFolder.childEntity:
if datastore._wsdlName == "Datastore":
log.debug(f"Found Datastore - {datastore._moId} - {datastore.name}")
dsObj = VSphereDataStore(id=datastore._moId, name=datastore.name)
self.graph.add_resource(dcObj, dsObj)
for vm in datastore.vm:
vmObj = self.instances_dict[vm._moId]
self.graph.add_edge(dsObj, vmObj)
elif datastore._wsdlName == "StoragePod":
log.debug(f"Found DatastoreCluster - {datastore._moId} - {datastore.name}")
dsc = VSphereDataStoreCluster(id=datastore._moId, name=datastore.name)
self.graph.add_resource(dcObj, dsc)
for store in datastore.childEntity:
log.debug(f"Found DatastoreCluster Datastore - {store._moId} - {store.name}")
if store._wsdlName == "Datastore":
dsObj = VSphereDataStore(id=store._moId, name=store.name)
self.graph.add_resource(dcObj, dsObj)
for vm in store.vm:
vmObj = self.instances_dict[vm._moId]
self.graph.add_edge(dsObj, vmObj)
# get hosts from a cluster
for host in cluster.host: #
log.debug(f"Found host - {host._moId} - {host.name}")
hostObj = VSphereESXiHost(id=host._moId, name=host.name)
self.graph.add_resource(clusterObj, hostObj)
# get vms for each host and read from the vm list
for vm in host.vm:
if vm._moId in self.instances_dict:
vmObj = self.instances_dict[vm._moId]
log.debug(
f"lookup vm - {vm._moId} - {vmObj.name} and assign to host {host._moId} - {host.name}"
)
self.graph.add_edge(hostObj, vmObj)
else:
log.warning(f"host {host._moId} - {host.name} reports {vm._moId} but instance not found")
@staticmethod
def add_config(config: Config) -> None:
config.add_config(VSphereConfig)
|
PypiClean
|
/pensando_dss-1.62.2-py3-none-any.whl/pensando_dss/psm/model/auth_role_binding.py
|
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from pensando_dss.psm.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from pensando_dss.psm.model.api_object_meta import ApiObjectMeta
from pensando_dss.psm.model.auth_role_binding_spec import AuthRoleBindingSpec
globals()['ApiObjectMeta'] = ApiObjectMeta
globals()['AuthRoleBindingSpec'] = AuthRoleBindingSpec
class AuthRoleBinding(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'api_version': (str,), # noqa: E501
'kind': (str,), # noqa: E501
'meta': (ApiObjectMeta,), # noqa: E501
'spec': (AuthRoleBindingSpec,), # noqa: E501
'status': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'api_version': 'api-version', # noqa: E501
'kind': 'kind', # noqa: E501
'meta': 'meta', # noqa: E501
'spec': 'spec', # noqa: E501
'status': 'status', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AuthRoleBinding - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
api_version (str): [optional] # noqa: E501
kind (str): [optional] # noqa: E501
meta (ApiObjectMeta): [optional] # noqa: E501
spec (AuthRoleBindingSpec): [optional] # noqa: E501
status ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): status part of role binding object.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
PypiClean
|
/trixie-0.1.2.tar.gz/trixie-0.1.2/homeassistant/components/climate/fritzbox.py
|
import logging
import requests
from homeassistant.components.fritzbox import DOMAIN as FRITZBOX_DOMAIN
from homeassistant.components.fritzbox import (
ATTR_STATE_DEVICE_LOCKED, ATTR_STATE_BATTERY_LOW, ATTR_STATE_LOCKED)
from homeassistant.components.climate import (
ATTR_OPERATION_MODE, ClimateDevice, STATE_ECO, STATE_HEAT, STATE_MANUAL,
STATE_OFF, STATE_ON, SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
ATTR_TEMPERATURE, PRECISION_HALVES, TEMP_CELSIUS)
DEPENDENCIES = ['fritzbox']
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE)
OPERATION_LIST = [STATE_HEAT, STATE_ECO, STATE_OFF, STATE_ON]
MIN_TEMPERATURE = 8
MAX_TEMPERATURE = 28
# special temperatures for on/off in Fritz!Box API (modified by pyfritzhome)
ON_API_TEMPERATURE = 127.0
OFF_API_TEMPERATURE = 126.5
ON_REPORT_SET_TEMPERATURE = 30.0
OFF_REPORT_SET_TEMPERATURE = 0.0
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Fritzbox smarthome thermostat platform."""
devices = []
fritz_list = hass.data[FRITZBOX_DOMAIN]
for fritz in fritz_list:
device_list = fritz.get_devices()
for device in device_list:
if device.has_thermostat:
devices.append(FritzboxThermostat(device, fritz))
add_devices(devices)
class FritzboxThermostat(ClimateDevice):
"""The thermostat class for Fritzbox smarthome thermostates."""
def __init__(self, device, fritz):
"""Initialize the thermostat."""
self._device = device
self._fritz = fritz
self._current_temperature = self._device.actual_temperature
self._target_temperature = self._device.target_temperature
self._comfort_temperature = self._device.comfort_temperature
self._eco_temperature = self._device.eco_temperature
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def available(self):
"""Return if thermostat is available."""
return self._device.present
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def precision(self):
"""Return precision 0.5."""
return PRECISION_HALVES
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._target_temperature in (ON_API_TEMPERATURE,
OFF_API_TEMPERATURE):
return None
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if ATTR_OPERATION_MODE in kwargs:
operation_mode = kwargs.get(ATTR_OPERATION_MODE)
self.set_operation_mode(operation_mode)
elif ATTR_TEMPERATURE in kwargs:
temperature = kwargs.get(ATTR_TEMPERATURE)
self._device.set_target_temperature(temperature)
@property
def current_operation(self):
"""Return the current operation mode."""
if self._target_temperature == ON_API_TEMPERATURE:
return STATE_ON
if self._target_temperature == OFF_API_TEMPERATURE:
return STATE_OFF
if self._target_temperature == self._comfort_temperature:
return STATE_HEAT
if self._target_temperature == self._eco_temperature:
return STATE_ECO
return STATE_MANUAL
@property
def operation_list(self):
"""Return the list of available operation modes."""
return OPERATION_LIST
def set_operation_mode(self, operation_mode):
"""Set new operation mode."""
if operation_mode == STATE_HEAT:
self.set_temperature(temperature=self._comfort_temperature)
elif operation_mode == STATE_ECO:
self.set_temperature(temperature=self._eco_temperature)
elif operation_mode == STATE_OFF:
self.set_temperature(temperature=OFF_REPORT_SET_TEMPERATURE)
elif operation_mode == STATE_ON:
self.set_temperature(temperature=ON_REPORT_SET_TEMPERATURE)
@property
def min_temp(self):
"""Return the minimum temperature."""
return MIN_TEMPERATURE
@property
def max_temp(self):
"""Return the maximum temperature."""
return MAX_TEMPERATURE
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
ATTR_STATE_DEVICE_LOCKED: self._device.device_lock,
ATTR_STATE_LOCKED: self._device.lock,
ATTR_STATE_BATTERY_LOW: self._device.battery_low,
}
return attrs
def update(self):
"""Update the data from the thermostat."""
try:
self._device.update()
self._current_temperature = self._device.actual_temperature
self._target_temperature = self._device.target_temperature
self._comfort_temperature = self._device.comfort_temperature
self._eco_temperature = self._device.eco_temperature
except requests.exceptions.HTTPError as ex:
_LOGGER.warning("Fritzbox connection error: %s", ex)
self._fritz.login()
|
PypiClean
|
/pied_piper_sdwan_sdk-1.0.51-py3-none-any.whl/openapi_client/model/ip_sec_security.py
|
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from openapi_client.exceptions import ApiAttributeError
class IpSecSecurity(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('authentication_type',): {
'regex': {
'pattern': r'(ah-sha1-hmac|sha1-hmac)', # noqa: E501
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'authentication_type': (str,), # noqa: E501
'ip_sec_pairwise_keying': (str,), # noqa: E501
'rekey_time': (int,), # noqa: E501
'replay_window': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'authentication_type': 'authenticationType', # noqa: E501
'ip_sec_pairwise_keying': 'ipSecPairwiseKeying', # noqa: E501
'rekey_time': 'rekeyTime', # noqa: E501
'replay_window': 'replayWindow', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IpSecSecurity - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
authentication_type (str): [optional] # noqa: E501
ip_sec_pairwise_keying (str): [optional] # noqa: E501
rekey_time (int): [optional] # noqa: E501
replay_window (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IpSecSecurity - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
authentication_type (str): [optional] # noqa: E501
ip_sec_pairwise_keying (str): [optional] # noqa: E501
rekey_time (int): [optional] # noqa: E501
replay_window (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/scribes.edit.complete_like_eclipse-0.2.4.tar.gz/scribes.edit.complete_like_eclipse-0.2.4/scribes/edit/complete_like_eclipse/__init__.py
|
from gettext import gettext as _
from string import whitespace
from scribes.helpers import TriggerManager, Trigger, connect_external, connect_all
from signals import Signals
from IndexerProcessManager import Manager as IndexerProcessManager
from DictionaryManager import Manager as DictionaryManager
from ProcessCommunicator import Communicator as ProcessCommunicator
from TextExtractor import Extractor as TextExtractor
from BufferMonitor import Monitor as BufferMonitor
trigger = Trigger('complete-word', '<alt>slash',
'Eclipse like word completition', 'Text Operations')
class Plugin(object):
def __init__(self, editor):
self.editor = editor
self.signals = Signals()
self.triggers = TriggerManager(editor)
connect_all(self, self.signals, self.triggers, textbuffer=self.editor.textbuffer)
self.block_word_reset = False
self.words = None
self.start_word = None
self.start_offset = None
self.indexer = IndexerProcessManager(self.signals.sender, editor)
self.dictionary_manager = DictionaryManager(self.signals.sender, editor)
self.communicator = ProcessCommunicator(self.signals.sender, editor)
self.extractor = TextExtractor(self.signals.sender, editor)
self.buffer_monitor = BufferMonitor(self.signals.sender, editor)
def unload(self):
self.signals.destroy.emit()
return False
def is_valid_character(self, c):
if c in whitespace:
return False
return c.isalpha() or c.isdigit() or (c in ("-", "_"))
def backward_to_word_begin(self, iterator):
if iterator.starts_line(): return iterator
iterator.backward_char()
while self.is_valid_character(iterator.get_char()):
iterator.backward_char()
if iterator.starts_line(): return iterator
iterator.forward_char()
return iterator
def forward_to_word_end(self, iterator):
if iterator.ends_line(): return iterator
if not self.is_valid_character(iterator.get_char()): return iterator
while self.is_valid_character(iterator.get_char()):
iterator.forward_char()
if iterator.ends_line(): return iterator
return iterator
def get_word_before_cursor(self):
iterator = self.editor.cursor.copy()
# If the cursor is in front of a valid character we ignore
# word completion.
if self.is_valid_character(iterator.get_char()):
return None, None
if iterator.starts_line():
return None, None
iterator.backward_char()
if not self.is_valid_character(iterator.get_char()):
return None, None
start = self.backward_to_word_begin(iterator.copy())
end = self.forward_to_word_end(iterator.copy())
word = self.editor.textbuffer.get_text(start, end).strip()
return word, start
def get_matches(self, string):
if not self.words:
return None
result = []
for word, count in self.words.iteritems():
if word != string and word.startswith(string):
result.append((word.encode('utf8'), count))
result.sort(key=lambda r: r[1], reverse=True)
return [r[0] for r in result]
@trigger
def cycle(self, *args):
word_to_complete, start = self.get_word_before_cursor()
if not word_to_complete:
return False
if not self.start_word or self.start_offset != start.get_offset():
self.start_word = word_to_complete
self.start_offset = start.get_offset()
matches = self.get_matches(self.start_word)
if matches:
idx = 0
try:
idx = matches.index(word_to_complete)
idx = (idx + 1) % len(matches)
except ValueError:
pass
if matches[idx] == word_to_complete:
self.editor.update_message(_("Word completed already"), "yes", 1)
return False
self.buffer_changed_handler.block()
end = self.editor.cursor.copy()
self.editor.textbuffer.delete(start, end)
self.editor.textbuffer.insert(start, matches[idx])
self.editor.response()
self.buffer_changed_handler.unblock()
else:
self.editor.update_message(_("No word to complete"), "no", 1)
return False
@Signals.dictionary
def word_list_updated(self, sender, words):
self.words = words
return False
@connect_external('textbuffer', 'changed')
def buffer_changed(self, *args):
self.start_word = None
self.start_iter = None
return False
|
PypiClean
|
/html_section-0.3.0-py3-none-any.whl/html_section/__init__.py
|
# Based on https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/writers/latex.py
#
# Copyright (c) 2007-2021 by the Sphinx team.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# stdlib
from typing import Any, Dict, List, Set, Type, cast
# 3rd party
import sphinx.transforms
from docutils import nodes
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.environment import BuildEnvironment
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import clean_astext
from sphinx.writers.latex import LaTeXTranslator
__author__: str = "Dominic Davis-Foster"
__copyright__: str = "2021 Dominic Davis-Foster"
__license__: str = "BSD License"
__version__: str = "0.3.0"
__email__: str = "[email protected]"
__all__ = [
"html_section_indicator",
"HTMLSectionDirective",
"RemoveHTMLOnlySections",
"phantom_section_indicator",
"PhantomSectionDirective",
"RemovePhantomSections",
"visit_title",
"depart_title",
"setup",
]
logger = logging.getLogger(__name__)
class _BuildEnvironment(BuildEnvironment):
html_only_node_docnames: Set[str]
phantom_node_docnames: Set[str]
latex_only_node_docnames: Set[str]
def _traverse(node: nodes.Node, condition: Type[nodes.Node]) -> List[nodes.Node]:
# node.findall is available, otherwise node.traverse
if hasattr(node, "findall"):
return list(node.findall(condition))
else:
return node.traverse(condition)
def visit_title(translator: LaTeXTranslator, node: nodes.title) -> None:
"""
Visit a :class:`docutils.nodes.title` node.
:param translator:
:param node: The node itself.
"""
if isinstance(node.parent, addnodes.seealso): # pragma: no cover
# the environment already handles this
raise nodes.SkipNode
elif isinstance(node.parent, nodes.section):
if translator.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0], nodes.Text):
logger.warning(__("document title is not a single Text node"), location=node)
if not translator.elements["title"]:
# text needs to be escaped since it is inserted into
# the output literally
translator.elements["title"] = translator.escape(node.astext())
translator.this_is_the_title = 0
raise nodes.SkipNode
# This is all from the original visit_title function
else: # pragma: no cover
short = ''
if _traverse(node, nodes.image):
short = f"[{translator.escape(' '.join(clean_astext(node).split()))}]"
try:
translator.body.append(f"\\{translator.sectionnames[translator.sectionlevel]}{short}{{")
except IndexError:
# just use "subparagraph", it's not numbered anyway
translator.body.append(f"\\{translator.sectionnames[-1]}{short}{{")
# breakpoint()
translator.context.append(f"}}\n{translator.hypertarget_to(node.parent)}")
# This is all from the original visit_title function
elif isinstance(node.parent, nodes.topic): # pragma: no cover
translator.body.append(r"\sphinxstyletopictitle{")
translator.context.append('}\n')
elif isinstance(node.parent, nodes.sidebar): # pragma: no cover
translator.body.append(r"\sphinxstylesidebartitle{")
translator.context.append('}\n')
elif isinstance(node.parent, nodes.Admonition): # pragma: no cover
translator.body.append('{')
translator.context.append('}\n')
elif isinstance(node.parent, nodes.table): # pragma: no cover
# Redirect body output until title is finished.
translator.pushbody([])
else: # pragma: no cover
logger.warning(
__("encountered title node not in section, topic, table, admonition or sidebar"),
location=node,
)
translator.body.append("\\sphinxstyleothertitle{")
translator.context.append('}\n')
translator.in_title = 1
def depart_title(
translator: LaTeXTranslator,
node: nodes.title,
) -> None: # pragma: no cover
"""
Depart a :class:`docutils.nodes.title` node.
:param translator:
:param node: The node itself.
"""
translator.in_title = 0
if isinstance(node.parent, nodes.table):
assert translator.table is not None
translator.table.caption = translator.popbody()
else:
translator.body.append(translator.context.pop())
class html_section_indicator(nodes.paragraph):
"""
Docutils node to mark sections as being HTML only.
"""
class HTMLSectionDirective(SphinxDirective):
"""
Sphinx directive for marking a section as being HTML-only.
"""
def run(self) -> List[nodes.Node]: # noqa: D102
return [html_section_indicator()]
class RemoveHTMLOnlySections(sphinx.transforms.SphinxTransform):
"""
Sphinx transform to mark the node, its parent and siblings as being HTML-only.
"""
default_priority = 999
def apply(self, **kwargs) -> None: # noqa: D102
env = cast(_BuildEnvironment, self.env)
if not hasattr(env, "html_only_node_docnames"):
env.html_only_node_docnames = set()
if self.app.builder.format.lower() == "html":
return
for node in _traverse(self.document, html_section_indicator):
assert node.parent is not None
parent = cast(nodes.Element, node.parent)
env.html_only_node_docnames.add(env.docname)
parent.replace_self(parent.children[parent.children.index(node):])
class phantom_section_indicator(nodes.paragraph):
"""
Docutils node to mark a section as being a phantom section.
"""
class PhantomSectionDirective(SphinxDirective):
"""
Sphinx directive for marking a section as being a phantom section.
"""
def run(self) -> List[nodes.Node]: # noqa: D102
return [phantom_section_indicator()]
class RemovePhantomSections(sphinx.transforms.SphinxTransform):
"""
Sphinx transform to mark the node, its parent and siblings as being a phantom section.
"""
default_priority = 999
def apply(self, **kwargs) -> None: # noqa: D102
env = cast(_BuildEnvironment, self.env)
if not hasattr(env, "phantom_node_docnames"):
env.phantom_node_docnames = set()
for node in _traverse(self.document, phantom_section_indicator):
assert node.parent is not None
parent = cast(nodes.Element, node.parent)
env.phantom_node_docnames.add(env.docname)
parent.replace_self(parent.children[parent.children.index(node):])
class latex_section_indicator(nodes.paragraph):
"""
Docutils node to mark sections as being HTML only.
.. versionadded:: 0.2.0
"""
class LaTeXSectionDirective(SphinxDirective):
"""
Sphinx directive for marking a section as being LaTeX-only.
.. versionadded:: 0.2.0
"""
def run(self) -> List[nodes.Node]:
return [latex_section_indicator()]
class RemoveLaTeXOnlySections(sphinx.transforms.SphinxTransform):
"""
Sphinx transform to mark the node, its parent and siblings as being LaTeX-only.
.. versionadded:: 0.2.0
"""
default_priority = 999
def apply(self, **kwargs) -> None:
env = cast(_BuildEnvironment, self.env)
if not hasattr(env, "latex_only_node_docnames"):
env.latex_only_node_docnames = set()
if cast(Builder, self.app.builder).format.lower() == "latex":
return
for node in _traverse(self.document, latex_section_indicator):
assert node.parent is not None
parent = cast(nodes.Element, node.parent)
env.latex_only_node_docnames.add(env.docname)
parent.replace_self(parent.children[parent.children.index(node):])
def purge_outdated(
app: Sphinx,
env: BuildEnvironment,
added: List[str],
changed: List[str],
removed: List[str],
) -> List[str]:
return [
*getattr(env, "html_only_node_docnames", []),
*getattr(env, "phantom_node_docnames", []),
*getattr(env, "latex_only_node_docnames", []),
]
def setup(app: Sphinx) -> Dict[str, Any]:
"""
Setup Sphinx Extension.
:param app:
"""
app.add_directive("html-section", HTMLSectionDirective)
app.add_directive("phantom-section", PhantomSectionDirective)
app.add_directive("latex-section", LaTeXSectionDirective)
app.add_transform(RemoveHTMLOnlySections)
app.add_transform(RemovePhantomSections)
app.add_transform(RemoveLaTeXOnlySections)
app.add_node(nodes.title, override=True, latex=(visit_title, depart_title))
app.connect("env-get-outdated", purge_outdated)
return {"version": __version__}
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_9/models/software_installation_steps_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_9 import models
class SoftwareInstallationStepsResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[SoftwareInstallationStep]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.SoftwareInstallationStep]
):
"""
Keyword args:
items (list[SoftwareInstallationStep])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareInstallationStepsResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareInstallationStepsResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareInstallationStepsResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareInstallationStepsResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareInstallationStepsResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareInstallationStepsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flashblade/FB_2_8/models/link_aggregation_group_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class LinkAggregationGroupResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[LinkAggregationGroup]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.LinkAggregationGroup]
):
"""
Keyword args:
items (list[LinkAggregationGroup]): A list of lag objects.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `LinkAggregationGroupResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LinkAggregationGroupResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LinkAggregationGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/eventstore_grpc-0.1.1.tar.gz/eventstore_grpc-0.1.1/src/eventstore_grpc/proto/persistent_pb2_grpc.py
|
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from eventstore_grpc.proto import persistent_pb2 as persistent__pb2
from eventstore_grpc.proto import shared_pb2 as shared__pb2
class PersistentSubscriptionsStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Create",
request_serializer=persistent__pb2.CreateReq.SerializeToString,
response_deserializer=persistent__pb2.CreateResp.FromString,
)
self.Update = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Update",
request_serializer=persistent__pb2.UpdateReq.SerializeToString,
response_deserializer=persistent__pb2.UpdateResp.FromString,
)
self.Delete = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Delete",
request_serializer=persistent__pb2.DeleteReq.SerializeToString,
response_deserializer=persistent__pb2.DeleteResp.FromString,
)
self.Read = channel.stream_stream(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Read",
request_serializer=persistent__pb2.ReadReq.SerializeToString,
response_deserializer=persistent__pb2.ReadResp.FromString,
)
self.GetInfo = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/GetInfo",
request_serializer=persistent__pb2.GetInfoReq.SerializeToString,
response_deserializer=persistent__pb2.GetInfoResp.FromString,
)
self.ReplayParked = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/ReplayParked",
request_serializer=persistent__pb2.ReplayParkedReq.SerializeToString,
response_deserializer=persistent__pb2.ReplayParkedResp.FromString,
)
self.List = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/List",
request_serializer=persistent__pb2.ListReq.SerializeToString,
response_deserializer=persistent__pb2.ListResp.FromString,
)
self.RestartSubsystem = channel.unary_unary(
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/RestartSubsystem",
request_serializer=shared__pb2.Empty.SerializeToString,
response_deserializer=shared__pb2.Empty.FromString,
)
class PersistentSubscriptionsServicer(object):
"""Missing associated documentation comment in .proto file."""
def Create(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Update(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Delete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Read(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetInfo(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ReplayParked(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def List(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RestartSubsystem(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_PersistentSubscriptionsServicer_to_server(servicer, server):
rpc_method_handlers = {
"Create": grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=persistent__pb2.CreateReq.FromString,
response_serializer=persistent__pb2.CreateResp.SerializeToString,
),
"Update": grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=persistent__pb2.UpdateReq.FromString,
response_serializer=persistent__pb2.UpdateResp.SerializeToString,
),
"Delete": grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=persistent__pb2.DeleteReq.FromString,
response_serializer=persistent__pb2.DeleteResp.SerializeToString,
),
"Read": grpc.stream_stream_rpc_method_handler(
servicer.Read,
request_deserializer=persistent__pb2.ReadReq.FromString,
response_serializer=persistent__pb2.ReadResp.SerializeToString,
),
"GetInfo": grpc.unary_unary_rpc_method_handler(
servicer.GetInfo,
request_deserializer=persistent__pb2.GetInfoReq.FromString,
response_serializer=persistent__pb2.GetInfoResp.SerializeToString,
),
"ReplayParked": grpc.unary_unary_rpc_method_handler(
servicer.ReplayParked,
request_deserializer=persistent__pb2.ReplayParkedReq.FromString,
response_serializer=persistent__pb2.ReplayParkedResp.SerializeToString,
),
"List": grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=persistent__pb2.ListReq.FromString,
response_serializer=persistent__pb2.ListResp.SerializeToString,
),
"RestartSubsystem": grpc.unary_unary_rpc_method_handler(
servicer.RestartSubsystem,
request_deserializer=shared__pb2.Empty.FromString,
response_serializer=shared__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"event_store.client.persistent_subscriptions.PersistentSubscriptions",
rpc_method_handlers,
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PersistentSubscriptions(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Create(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Create",
persistent__pb2.CreateReq.SerializeToString,
persistent__pb2.CreateResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Update(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Update",
persistent__pb2.UpdateReq.SerializeToString,
persistent__pb2.UpdateResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Delete(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Delete",
persistent__pb2.DeleteReq.SerializeToString,
persistent__pb2.DeleteResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Read(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/Read",
persistent__pb2.ReadReq.SerializeToString,
persistent__pb2.ReadResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def GetInfo(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/GetInfo",
persistent__pb2.GetInfoReq.SerializeToString,
persistent__pb2.GetInfoResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ReplayParked(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/ReplayParked",
persistent__pb2.ReplayParkedReq.SerializeToString,
persistent__pb2.ReplayParkedResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def List(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/List",
persistent__pb2.ListReq.SerializeToString,
persistent__pb2.ListResp.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def RestartSubsystem(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/event_store.client.persistent_subscriptions.PersistentSubscriptions/RestartSubsystem",
shared__pb2.Empty.SerializeToString,
shared__pb2.Empty.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
|
PypiClean
|
/python_microscopy-20.12.8-cp36-cp36m-win_amd64.whl/PYME/Acquire/Protocols/prebleach642.py
|
##################
# prebleach671.py
#
# Copyright David Baddeley, 2009
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
#import all the stuff to make this work
from PYME.Acquire.protocol import *
import numpy
#define a list of tasks, where T(when, what, *args) creates a new task
#when is the frame number, what is a function to be called, and *args are any
#additional arguments
taskList = [
T(-1, scope.turnAllLasersOff),
T(-1, SetCameraShutter, False),
T(-1, scope.joystick.Enable, False),
#T(-1, SetEMGain,150),
T(20, SetCameraShutter, True),
#T(20, scope.filterWheel.SetFilterPos, "ND4"),
T(20, scope.l642.SetPower, .01),
T(21, scope.l642.TurnOn),
T(58, scope.l642.TurnOff),
T(60, SetEMGain,0),
T(61, scope.l642.TurnOn),
T(61, scope.l642.SetPower, 1),
T(200, SetEMGain,scope.cam.DefaultEMGain),
T(210, MainFrame.pan_spool.OnBAnalyse, None),
T(maxint, scope.turnAllLasersOff),
T(maxint, scope.l642.SetPower, .01),
T(maxint, scope.joystick.Enable, True),
]
#optional - metadata entries
metaData = [
('Protocol.DarkFrameRange', (0, 20)),
('Protocol.DataStartsAt', 201),
('Protocol.PrebleachFrames', (21, 58)),
('Protocol.BleachFrames', (61,200)),
]
#optional - pre-flight check
#a list of checks which should be performed prior to launching the protocol
#syntax: C(expression to evaluate (quoted, should have boolean return), message to display on failure),
preflight = [
C('scope.cam.GetEMGain() == scope.cam.DefaultEMGain', 'Was expecting an intial e.m. gain of %d' % scope.cam.DefaultEMGain),
C('scope.cam.GetROIX1() > 1', 'Looks like no ROI has been set'),
C('scope.cam.GetIntegTime() < .06', 'Camera integration time may be too long'),
]
#must be defined for protocol to be discovered
PROTOCOL = TaskListProtocol(taskList, metaData, preflight)
PROTOCOL_STACK = ZStackTaskListProtocol(taskList, 101, 100, metaData, preflight, randomise = False)
|
PypiClean
|
/openapi_python_embyapi-4.8.0.36.tar.gz/openapi_python_embyapi-4.8.0.36/openapi_python_embyapi/models/transcoding_vp_step_info.py
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.transcoding_vp_step_types import TranscodingVpStepTypes
from ..types import UNSET, Unset
T = TypeVar("T", bound="TranscodingVpStepInfo")
@attr.s(auto_attribs=True)
class TranscodingVpStepInfo:
"""
Attributes:
step_type (Union[Unset, TranscodingVpStepTypes]):
step_type_name (Union[Unset, str]):
hardware_context_name (Union[Unset, str]):
is_hardware_context (Union[Unset, bool]):
name (Union[Unset, str]):
short (Union[Unset, str]):
ffmpeg_name (Union[Unset, str]):
ffmpeg_description (Union[Unset, str]):
ffmpeg_options (Union[Unset, str]):
param (Union[Unset, str]):
param_short (Union[Unset, str]):
"""
step_type: Union[Unset, TranscodingVpStepTypes] = UNSET
step_type_name: Union[Unset, str] = UNSET
hardware_context_name: Union[Unset, str] = UNSET
is_hardware_context: Union[Unset, bool] = UNSET
name: Union[Unset, str] = UNSET
short: Union[Unset, str] = UNSET
ffmpeg_name: Union[Unset, str] = UNSET
ffmpeg_description: Union[Unset, str] = UNSET
ffmpeg_options: Union[Unset, str] = UNSET
param: Union[Unset, str] = UNSET
param_short: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
step_type: Union[Unset, str] = UNSET
if not isinstance(self.step_type, Unset):
step_type = self.step_type.value
step_type_name = self.step_type_name
hardware_context_name = self.hardware_context_name
is_hardware_context = self.is_hardware_context
name = self.name
short = self.short
ffmpeg_name = self.ffmpeg_name
ffmpeg_description = self.ffmpeg_description
ffmpeg_options = self.ffmpeg_options
param = self.param
param_short = self.param_short
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if step_type is not UNSET:
field_dict["StepType"] = step_type
if step_type_name is not UNSET:
field_dict["StepTypeName"] = step_type_name
if hardware_context_name is not UNSET:
field_dict["HardwareContextName"] = hardware_context_name
if is_hardware_context is not UNSET:
field_dict["IsHardwareContext"] = is_hardware_context
if name is not UNSET:
field_dict["Name"] = name
if short is not UNSET:
field_dict["Short"] = short
if ffmpeg_name is not UNSET:
field_dict["FfmpegName"] = ffmpeg_name
if ffmpeg_description is not UNSET:
field_dict["FfmpegDescription"] = ffmpeg_description
if ffmpeg_options is not UNSET:
field_dict["FfmpegOptions"] = ffmpeg_options
if param is not UNSET:
field_dict["Param"] = param
if param_short is not UNSET:
field_dict["ParamShort"] = param_short
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
_step_type = d.pop("StepType", UNSET)
step_type: Union[Unset, TranscodingVpStepTypes]
if isinstance(_step_type, Unset):
step_type = UNSET
else:
step_type = TranscodingVpStepTypes(_step_type)
step_type_name = d.pop("StepTypeName", UNSET)
hardware_context_name = d.pop("HardwareContextName", UNSET)
is_hardware_context = d.pop("IsHardwareContext", UNSET)
name = d.pop("Name", UNSET)
short = d.pop("Short", UNSET)
ffmpeg_name = d.pop("FfmpegName", UNSET)
ffmpeg_description = d.pop("FfmpegDescription", UNSET)
ffmpeg_options = d.pop("FfmpegOptions", UNSET)
param = d.pop("Param", UNSET)
param_short = d.pop("ParamShort", UNSET)
transcoding_vp_step_info = cls(
step_type=step_type,
step_type_name=step_type_name,
hardware_context_name=hardware_context_name,
is_hardware_context=is_hardware_context,
name=name,
short=short,
ffmpeg_name=ffmpeg_name,
ffmpeg_description=ffmpeg_description,
ffmpeg_options=ffmpeg_options,
param=param,
param_short=param_short,
)
transcoding_vp_step_info.additional_properties = d
return transcoding_vp_step_info
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
PypiClean
|
/plonetheme.cultureCab-0.4.tar.gz/plonetheme.cultureCab-0.4/Paste-1.7.4-py2.4.egg/paste/registry.py
|
import sys
import paste.util.threadinglocal as threadinglocal
__all__ = ['StackedObjectProxy', 'RegistryManager', 'StackedObjectRestorer',
'restorer']
class NoDefault(object): pass
class StackedObjectProxy(object):
"""Track an object instance internally using a stack
The StackedObjectProxy proxies access to an object internally using a
stacked thread-local. This makes it safe for complex WSGI environments
where access to the object may be desired in multiple places without
having to pass the actual object around.
New objects are added to the top of the stack with _push_object while
objects can be removed with _pop_object.
"""
def __init__(self, default=NoDefault, name="Default"):
"""Create a new StackedObjectProxy
If a default is given, its used in every thread if no other object
has been pushed on.
"""
self.__dict__['____name__'] = name
self.__dict__['____local__'] = threadinglocal.local()
if default is not NoDefault:
self.__dict__['____default_object__'] = default
def __dir__(self):
"""Return a list of the StackedObjectProxy's and proxied
object's (if one exists) names.
"""
dir_list = dir(self.__class__) + self.__dict__.keys()
try:
dir_list.extend(dir(self._current_obj()))
except TypeError:
pass
dir_list.sort()
return dir_list
def __getattr__(self, attr):
return getattr(self._current_obj(), attr)
def __setattr__(self, attr, value):
setattr(self._current_obj(), attr, value)
def __delattr__(self, name):
delattr(self._current_obj(), name)
def __getitem__(self, key):
return self._current_obj()[key]
def __setitem__(self, key, value):
self._current_obj()[key] = value
def __delitem__(self, key):
del self._current_obj()[key]
def __call__(self, *args, **kw):
return self._current_obj()(*args, **kw)
def __repr__(self):
try:
return repr(self._current_obj())
except (TypeError, AttributeError):
return '<%s.%s object at 0x%x>' % (self.__class__.__module__,
self.__class__.__name__,
id(self))
def __iter__(self):
return iter(self._current_obj())
def __len__(self):
return len(self._current_obj())
def __contains__(self, key):
return key in self._current_obj()
def __nonzero__(self):
return bool(self._current_obj())
def _current_obj(self):
"""Returns the current active object being proxied to
In the event that no object was pushed, the default object if
provided will be used. Otherwise, a TypeError will be raised.
"""
try:
objects = self.____local__.objects
except AttributeError:
objects = None
if objects:
return objects[-1]
else:
obj = self.__dict__.get('____default_object__', NoDefault)
if obj is not NoDefault:
return obj
else:
raise TypeError(
'No object (name: %s) has been registered for this '
'thread' % self.____name__)
def _push_object(self, obj):
"""Make ``obj`` the active object for this thread-local.
This should be used like:
.. code-block:: python
obj = yourobject()
module.glob = StackedObjectProxy()
module.glob._push_object(obj)
try:
... do stuff ...
finally:
module.glob._pop_object(conf)
"""
try:
self.____local__.objects.append(obj)
except AttributeError:
self.____local__.objects = []
self.____local__.objects.append(obj)
def _pop_object(self, obj=None):
"""Remove a thread-local object.
If ``obj`` is given, it is checked against the popped object and an
error is emitted if they don't match.
"""
try:
popped = self.____local__.objects.pop()
if obj and popped is not obj:
raise AssertionError(
'The object popped (%s) is not the same as the object '
'expected (%s)' % (popped, obj))
except AttributeError:
raise AssertionError('No object has been registered for this thread')
def _object_stack(self):
"""Returns all of the objects stacked in this container
(Might return [] if there are none)
"""
try:
return self.____local__.objects[:]
except AssertionError:
return []
# The following methods will be swapped for their original versions by
# StackedObjectRestorer when restoration is enabled. The original
# functions (e.g. _current_obj) will be available at _current_obj_orig
def _current_obj_restoration(self):
request_id = restorer.in_restoration()
if request_id:
return restorer.get_saved_proxied_obj(self, request_id)
return self._current_obj_orig()
_current_obj_restoration.__doc__ = \
('%s\n(StackedObjectRestorer restoration enabled)' % \
_current_obj.__doc__)
def _push_object_restoration(self, obj):
if not restorer.in_restoration():
self._push_object_orig(obj)
_push_object_restoration.__doc__ = \
('%s\n(StackedObjectRestorer restoration enabled)' % \
_push_object.__doc__)
def _pop_object_restoration(self, obj=None):
if not restorer.in_restoration():
self._pop_object_orig(obj)
_pop_object_restoration.__doc__ = \
('%s\n(StackedObjectRestorer restoration enabled)' % \
_pop_object.__doc__)
class Registry(object):
"""Track objects and stacked object proxies for removal
The Registry object is instantiated a single time for the request no
matter how many times the RegistryManager is used in a WSGI stack. Each
RegistryManager must call ``prepare`` before continuing the call to
start a new context for object registering.
Each context is tracked with a dict inside a list. The last list
element is the currently executing context. Each context dict is keyed
by the id of the StackedObjectProxy instance being proxied, the value
is a tuple of the StackedObjectProxy instance and the object being
tracked.
"""
def __init__(self):
"""Create a new Registry object
``prepare`` must still be called before this Registry object can be
used to register objects.
"""
self.reglist = []
def prepare(self):
"""Used to create a new registry context
Anytime a new RegistryManager is called, ``prepare`` needs to be
called on the existing Registry object. This sets up a new context
for registering objects.
"""
self.reglist.append({})
def register(self, stacked, obj):
"""Register an object with a StackedObjectProxy"""
myreglist = self.reglist[-1]
stacked_id = id(stacked)
if stacked_id in myreglist:
stacked._pop_object(myreglist[stacked_id][1])
del myreglist[stacked_id]
stacked._push_object(obj)
myreglist[stacked_id] = (stacked, obj)
def multiregister(self, stacklist):
"""Register a list of tuples
Similar call semantics as register, except this registers
multiple objects at once.
Example::
registry.multiregister([(sop, obj), (anothersop, anotherobj)])
"""
myreglist = self.reglist[-1]
for stacked, obj in stacklist:
stacked_id = id(stacked)
if stacked_id in myreglist:
stacked._pop_object(myreglist[stacked_id][1])
del myreglist[stacked_id]
stacked._push_object(obj)
myreglist[stacked_id] = (stacked, obj)
# Replace now does the same thing as register
replace = register
def cleanup(self):
"""Remove all objects from all StackedObjectProxy instances that
were tracked at this Registry context"""
for stacked, obj in self.reglist[-1].itervalues():
stacked._pop_object(obj)
self.reglist.pop()
class RegistryManager(object):
"""Creates and maintains a Registry context
RegistryManager creates a new registry context for the registration of
StackedObjectProxy instances. Multiple RegistryManager's can be in a
WSGI stack and will manage the context so that the StackedObjectProxies
always proxy to the proper object.
The object being registered can be any object sub-class, list, or dict.
Registering objects is done inside a WSGI application under the
RegistryManager instance, using the ``environ['paste.registry']``
object which is a Registry instance.
"""
def __init__(self, application, streaming=False):
self.application = application
self.streaming = streaming
def __call__(self, environ, start_response):
app_iter = None
reg = environ.setdefault('paste.registry', Registry())
reg.prepare()
if self.streaming:
return self.streaming_iter(reg, environ, start_response)
try:
app_iter = self.application(environ, start_response)
except Exception, e:
# Regardless of if the content is an iterable, generator, list
# or tuple, we clean-up right now. If its an iterable/generator
# care should be used to ensure the generator has its own ref
# to the actual object
if environ.get('paste.evalexception'):
# EvalException is present in the WSGI stack
expected = False
for expect in environ.get('paste.expected_exceptions', []):
if isinstance(e, expect):
expected = True
if not expected:
# An unexpected exception: save state for EvalException
restorer.save_registry_state(environ)
reg.cleanup()
raise
except:
# Save state for EvalException if it's present
if environ.get('paste.evalexception'):
restorer.save_registry_state(environ)
reg.cleanup()
raise
else:
reg.cleanup()
return app_iter
def streaming_iter(self, reg, environ, start_response):
try:
for item in self.application(environ, start_response):
yield item
except Exception, e:
# Regardless of if the content is an iterable, generator, list
# or tuple, we clean-up right now. If its an iterable/generator
# care should be used to ensure the generator has its own ref
# to the actual object
if environ.get('paste.evalexception'):
# EvalException is present in the WSGI stack
expected = False
for expect in environ.get('paste.expected_exceptions', []):
if isinstance(e, expect):
expected = True
if not expected:
# An unexpected exception: save state for EvalException
restorer.save_registry_state(environ)
reg.cleanup()
raise
except:
# Save state for EvalException if it's present
if environ.get('paste.evalexception'):
restorer.save_registry_state(environ)
reg.cleanup()
raise
else:
reg.cleanup()
class StackedObjectRestorer(object):
"""Track StackedObjectProxies and their proxied objects for automatic
restoration within EvalException's interactive debugger.
An instance of this class tracks all StackedObjectProxy state in existence
when unexpected exceptions are raised by WSGI applications housed by
EvalException and RegistryManager. Like EvalException, this information is
stored for the life of the process.
When an unexpected exception occurs and EvalException is present in the
WSGI stack, save_registry_state is intended to be called to store the
Registry state and enable automatic restoration on all currently registered
StackedObjectProxies.
With restoration enabled, those StackedObjectProxies' _current_obj
(overwritten by _current_obj_restoration) method's strategy is modified:
it will return its appropriate proxied object from the restorer when
a restoration context is active in the current thread.
The StackedObjectProxies' _push/pop_object methods strategies are also
changed: they no-op when a restoration context is active in the current
thread (because the pushing/popping work is all handled by the
Registry/restorer).
The request's Registry objects' reglists are restored from the restorer
when a restoration context begins, enabling the Registry methods to work
while their changes are tracked by the restorer.
The overhead of enabling restoration is negligible (another threadlocal
access for the changed StackedObjectProxy methods) for normal use outside
of a restoration context, but worth mentioning when combined with
StackedObjectProxies normal overhead. Once enabled it does not turn off,
however:
o Enabling restoration only occurs after an unexpected exception is
detected. The server is likely to be restarted shortly after the exception
is raised to fix the cause
o StackedObjectRestorer is only enabled when EvalException is enabled (not
on a production server) and RegistryManager exists in the middleware
stack"""
def __init__(self):
# Registries and their saved reglists by request_id
self.saved_registry_states = {}
self.restoration_context_id = threadinglocal.local()
def save_registry_state(self, environ):
"""Save the state of this request's Registry (if it hasn't already been
saved) to the saved_registry_states dict, keyed by the request's unique
identifier"""
registry = environ.get('paste.registry')
if not registry or not len(registry.reglist) or \
self.get_request_id(environ) in self.saved_registry_states:
# No Registry, no state to save, or this request's state has
# already been saved
return
self.saved_registry_states[self.get_request_id(environ)] = \
(registry, registry.reglist[:])
# Tweak the StackedObjectProxies we want to save state for -- change
# their methods to act differently when a restoration context is active
# in the current thread
for reglist in registry.reglist:
for stacked, obj in reglist.itervalues():
self.enable_restoration(stacked)
def get_saved_proxied_obj(self, stacked, request_id):
"""Retrieve the saved object proxied by the specified
StackedObjectProxy for the request identified by request_id"""
# All state for the request identified by request_id
reglist = self.saved_registry_states[request_id][1]
# The top of the stack was current when the exception occurred
stack_level = len(reglist) - 1
stacked_id = id(stacked)
while True:
if stack_level < 0:
# Nothing registered: Call _current_obj_orig to raise a
# TypeError
return stacked._current_obj_orig()
context = reglist[stack_level]
if stacked_id in context:
break
# This StackedObjectProxy may not have been registered by the
# RegistryManager that was active when the exception was raised --
# continue searching down the stack until it's found
stack_level -= 1
return context[stacked_id][1]
def enable_restoration(self, stacked):
"""Replace the specified StackedObjectProxy's methods with their
respective restoration versions.
_current_obj_restoration forces recovery of the saved proxied object
when a restoration context is active in the current thread.
_push/pop_object_restoration avoid pushing/popping data
(pushing/popping is only done at the Registry level) when a restoration
context is active in the current thread"""
if '_current_obj_orig' in stacked.__dict__:
# Restoration already enabled
return
for func_name in ('_current_obj', '_push_object', '_pop_object'):
orig_func = getattr(stacked, func_name)
restoration_func = getattr(stacked, func_name + '_restoration')
stacked.__dict__[func_name + '_orig'] = orig_func
stacked.__dict__[func_name] = restoration_func
def get_request_id(self, environ):
"""Return a unique identifier for the current request"""
from paste.evalexception.middleware import get_debug_count
return get_debug_count(environ)
def restoration_begin(self, request_id):
"""Enable a restoration context in the current thread for the specified
request_id"""
if request_id in self.saved_registry_states:
# Restore the old Registry object's state
registry, reglist = self.saved_registry_states[request_id]
registry.reglist = reglist
self.restoration_context_id.request_id = request_id
def restoration_end(self):
"""Register a restoration context as finished, if one exists"""
try:
del self.restoration_context_id.request_id
except AttributeError:
pass
def in_restoration(self):
"""Determine if a restoration context is active for the current thread.
Returns the request_id it's active for if so, otherwise False"""
return getattr(self.restoration_context_id, 'request_id', False)
restorer = StackedObjectRestorer()
# Paste Deploy entry point
def make_registry_manager(app, global_conf):
return RegistryManager(app)
make_registry_manager.__doc__ = RegistryManager.__doc__
|
PypiClean
|
/cognite_wells_sdk-0.18.0-py3-none-any.whl/cognite/well_model/client/models/trajectory_rows.py
|
import math
from pandas import DataFrame
from cognite.well_model.client.utils._auxiliary import to_camel_case
from cognite.well_model.models import TrajectoryData
class TrajectoryRows:
"""Custom data class for displaying trajectory data as data frames."""
def __init__(self, data: TrajectoryData):
self.sequence_external_id = data.source.sequence_external_id
self.wellbore_asset_external_id = data.wellbore_asset_external_id
self.source = data.source
self.inclination_unit = data.inclination_unit
self.azimuth_unit = data.azimuth_unit
self.type = data.type
self.measured_depth_unit = data.measured_depth_unit
self.true_vertical_depth_unit = data.true_vertical_depth_unit
self.equivalent_departure_unit = data.equivalent_departure_unit
self.rows = data.rows
def to_pandas(self, camel_case=True) -> DataFrame:
"""Generate pandas DataFrame
Returns:
DataFrame:
"""
column_names = [
"measured_depth",
"true_vertical_depth",
"azimuth",
"inclination",
"northOffset",
"eastOffset",
"equivalent_departure",
"northing",
"easting",
"dogleg_severity",
]
column_names = [to_camel_case(x) for x in column_names]
row_values = []
for r in self.rows:
row = [
r.measured_depth,
r.true_vertical_depth,
r.azimuth,
r.inclination,
r.north_offset,
r.east_offset,
r.equivalent_departure,
r.northing,
r.easting,
r.dogleg_severity,
]
row = [x if x is not None else math.nan for x in row]
row_values.append(row)
# TODO: set index to MD and remove MD from values?
return DataFrame(
row_values,
columns=column_names,
)
def _repr_html_(self):
return self.to_pandas()._repr_html_()
def __getitem__(self, item):
return self.rows[item]
def __iter__(self):
return self.rows.__iter__()
def __repr__(self):
return_string = [object.__repr__(d) for d in self.rows]
return f"[{', '.join(r for r in return_string)}]"
def __len__(self):
return self.rows.__len__()
|
PypiClean
|
/django-utf8field-1.0.0.tar.gz/django-utf8field-1.0.0/README.rst
|
==================
Django UTF-8 Field
==================
.. image:: https://travis-ci.org/megasnort/django-utf8field.svg
:target: https://travis-ci.org/megasnort/django-utf8field/
:alt: Build status
This package was created because at my work, `Language and Translation Technology Team`_ at the University of Ghent, we often create demos on the web that allow the user to input and process text or files. These texts are then processed by other scripts that expect clean UTF-8-texts.
This library extends the Django FileField, CharField and TextField by checking if the content of a submitted file or text is clean. If not, it generates an error. Checks are executed for four byte long characters and NULL characters.
Requirements
------------
Django >= 1.8
Installation
------------
::
pip install django-utf8field
Usage
-----
Add the app to your settings:
::
INSTALLED_APPS = (
...
'utf8field',
...
FileField
^^^^^^^^^
Create a model like you would do normally, but instead of using FileField you use UTF8FileField:
::
from django.db import models
from utf8field.fields import UTF8FileField
class YourModel(models.Model):
title = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_add_on=True)
text = models.UTF8FileField()
You also have the option to provide the option `max_content_length` to limit the number of characters in the file. If the content is longer an error will be displayed. If you want to enable `four_byte_detection` set the parameter to True.
::
text = models.UTF8FileField(max_content_length=1000, four_byte_detection=True)
CharField
^^^^^^^^^
Create a model like you would do normally, but instead of using CharField you use UTF8CharField. If you want to enable `four_byte_detection` set the parameter to True.
::
from django.db import models
from utf8field.fields import UTF8CharField
class YourModel(models.Model):
title = models.CharField(max_length=255, four_byte_detection=True)
created_on = models.DateTimeField(auto_add_on=True)
text = models.UTF8CharField(max_length=1000)
TextField
^^^^^^^^^
Create a model like you would do normally, but instead of using TextField you use UTF8TextField. If you want to enable `four_byte_detection` set the parameter to True.
::
from django.db import models
from utf8field.fields import UTF8TextField
class YourModel(models.Model):
title = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_add_on=True)
text = models.UTF8TextField(four_byte_detection=True)
Django Rest Framework
^^^^^^^^^^^^^^^^^^^^^
The necessary serializers and automatic mapping of fields is provided so you should not be doing anything yourself to get the texts or files validated when using a ModelSerializer.
Development
-----------
To run the tests make sure Django, Django Rest Framework and coverage are installed (`pip install django djangorestframework coverage`) and execute
::
python manage.py test
To create extra translations, execute
::
pm makemessages --locale=nl --extension=py --ignore=dev_example --ignore=build
... and modify the resulting `django.po` file in `utf8field/locale/nl/LC_MESSAGES`.
.. _`Language and Translation Technology Team`: https://lt3.ugent.be
|
PypiClean
|
/variable_generators-0.1.tar.gz/variable_generators-0.1/variable_generators/generators.py
|
from __future__ import print_function
import numpy as np
import pandas as pd
import orca
from urbansim.utils import misc
from urbansim.models import util
try:
import pandana
except ImportError:
pass
def make_agg_var(agent, geog, geog_id, var_to_aggregate, agg_function, how_fillna=None):
"""
Generator function for aggregation variables. Registers with orca.
"""
var_name = agg_function + '_' + var_to_aggregate
@orca.column(geog, var_name, cache=True, cache_scope='iteration')
def func():
agents = orca.get_table(agent)
print('Calculating {} of {} for {}'
.format(var_name, agent, geog))
groupby = agents[var_to_aggregate].groupby(agents[geog_id])
if agg_function == 'mean':
values = groupby.mean().fillna(0)
if agg_function == 'median':
values = groupby.median().fillna(0)
if agg_function == 'std':
values = groupby.std().fillna(0)
if agg_function == 'sum':
values = groupby.sum().fillna(0)
if agg_function == 'max':
values = groupby.max().fillna(0)
if agg_function == 'min':
values = groupby.min().fillna(0)
locations_index = orca.get_table(geog).index
series = pd.Series(data=values, index=locations_index)
# Fillna.
# For certain functions, must add other options,
# like puma value or neighboring value
if how_fillna is not None:
series = how_fillna(series)
else:
if agg_function == 'sum':
series = series.fillna(0)
else:
series = series.fillna(method='ffill')
series = series.fillna(method='bfill')
return series
return func
def make_disagg_var(from_geog_name, to_geog_name, var_to_disaggregate,
from_geog_id_name, name_based_on_geography=True):
"""
Generator function for disaggregating variables. Registers with orca.
"""
if name_based_on_geography:
var_name = from_geog_name + '_' + var_to_disaggregate
else:
var_name = var_to_disaggregate
@orca.column(to_geog_name, var_name, cache=True, cache_scope='iteration')
def func():
print('Disaggregating {} to {} from {}'
.format(var_to_disaggregate, to_geog_name, from_geog_name))
from_geog = orca.get_table(from_geog_name)
to_geog = orca.get_table(to_geog_name)
return misc.reindex(from_geog[var_to_disaggregate],
to_geog[from_geog_id_name]).fillna(0)
return func
def make_size_var(agent, geog, geog_id, cache=True, cache_scope='step', prefix_agent='total'):
"""
Generator function for size variables. Registers with orca.
"""
var_name = prefix_agent + '_' + agent
@orca.column(geog, var_name, cache=cache, cache_scope=cache_scope)
def func():
agents = orca.get_table(agent)
print('Calculating number of {} for {}'.format(agent, geog))
size = agents[geog_id].value_counts()
locations_index = orca.get_table(geog).index
series = pd.Series(data=size, index=locations_index)
series = series.fillna(0)
return series
return func
def make_proportion_var(agent, geog, geog_id, target_variable, target_value, prefix_agent='total'):
"""
Generator function for proportion variables. Registers with orca.
"""
try:
var_name = 'prop_%s_%s' % (target_variable, int(target_value))
except Exception:
var_name = 'prop_%s_%s' % (target_variable, target_value)
@orca.column(geog, var_name, cache=True, cache_scope='iteration')
def func():
agents = orca.get_table(agent).to_frame(
columns=[target_variable, geog_id])
locations = orca.get_table(geog)
print('Calculating proportion {} {} for {}'
.format(target_variable, target_value, geog))
agent_subset = agents[agents[target_variable] == target_value]
series = (agent_subset.groupby(geog_id).size()
* 1.0
/ locations[prefix_agent + '_' + agent])
series = series.fillna(0)
return series
return func
def make_dummy_variable(agent, geog_var, geog_id):
"""
Generator function for spatial dummy. Registers with orca.
"""
# cache_scope
try:
var_name = geog_var + '_is_' + str(geog_id)
except Exception:
var_name = geog_var + '_is_' + str(int(geog_id))
@orca.column(agent, var_name, cache=True, cache_scope='iteration')
def func():
agents = orca.get_table(agent)
return (agents[geog_var] == geog_id).astype('int32')
return func
def make_ratio_var(agent1, agent2, geog, prefix1='total', prefix2='total'):
"""
Generator function for ratio variables. Registers with orca.
"""
var_name = 'ratio_%s_to_%s' % (agent1, agent2)
@orca.column(geog, var_name, cache=True, cache_scope='iteration')
def func():
locations = orca.get_table(geog)
print('Calculating ratio of {} to {} for {}'
.format(agent1, agent2, geog))
series = (locations[prefix1 + '_' + agent1]
* 1.0
/ (locations[prefix2 + '_' + agent2] + 1.0))
series = series.fillna(0)
return series
return func
def make_density_var(agent, geog, prefix_agent='total'):
"""
Generator function for density variables. Registers with orca.
"""
var_name = 'density_%s' % (agent)
@orca.column(geog, var_name, cache=True, cache_scope='iteration')
def func():
locations = orca.get_table(geog)
print('Calculating density of {} for {}'.format(agent, geog))
series = locations[prefix_agent + '_' + agent] * 1.0 / (
locations['sum_acres'] + 1.0)
series = series.fillna(0)
return series
return func
def make_access_var(name, agent, target_variable=False, target_value=False,
radius=1000, agg_function='sum', decay='flat', log=True,
filters=False):
"""
Generator function for accessibility variables. Registers with orca.
"""
@orca.column('nodes', name, cache=True, cache_scope='iteration')
def func(net):
print('Calculating {}'.format(name))
nodes = pd.DataFrame(index=net.node_ids)
flds = [target_variable] if target_variable else []
if target_value:
flds += util.columns_in_filters(
["{} == {}".format(target_variable, target_value)])
if filters:
flds += util.columns_in_filters(filters)
flds.append('node_id')
df = orca.get_table(agent).to_frame(flds)
if target_value:
df = util.apply_filter_query(df, [
"{} == {}".format(target_variable, target_value)])
if filters:
df = util.apply_filter_query(df, filters)
net.set(df['node_id'],
variable=df[target_variable] if target_variable else None)
nodes[name] = net.aggregate(radius, type=agg_function, decay=decay)
if log:
nodes[name] = nodes[name].apply(eval('np.log1p'))
return nodes[name]
return func
|
PypiClean
|
/pypifuhd-2.0.0.tar.gz/pypifuhd-2.0.0/py_pifuhd/lib/model/ConvFilters.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.resnet as resnet
import torchvision.models.vgg as vgg
class MultiConv(nn.Module):
def __init__(self, filter_channels):
super(MultiConv, self).__init__()
self.filters = []
for l in range(0, len(filter_channels) - 1):
self.filters.append(
nn.Conv2d(filter_channels[l], filter_channels[l + 1], kernel_size=4, stride=2))
self.add_module("conv%d" % l, self.filters[l])
def forward(self, image):
'''
:param image: [BxC_inxHxW] tensor of input image
:return: list of [BxC_outxHxW] tensors of output features
'''
y = image
# y = F.relu(self.bn0(self.conv0(y)), True)
feat_pyramid = [y]
for i, f in enumerate(self.filters):
y = f(y)
if i != len(self.filters) - 1:
y = F.leaky_relu(y)
# y = F.max_pool2d(y, kernel_size=2, stride=2)
feat_pyramid.append(y)
return feat_pyramid
class Vgg16(torch.nn.Module):
def __init__(self):
super(Vgg16, self).__init__()
vgg_pretrained_features = vgg.vgg16(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
return [h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3]
class ResNet(nn.Module):
def __init__(self, model='resnet18'):
super(ResNet, self).__init__()
if model == 'resnet18':
net = resnet.resnet18(pretrained=True)
elif model == 'resnet34':
net = resnet.resnet34(pretrained=True)
elif model == 'resnet50':
net = resnet.resnet50(pretrained=True)
else:
raise NameError('Unknown Fan Filter setting!')
self.conv1 = net.conv1
self.pool = net.maxpool
self.layer0 = nn.Sequential(net.conv1, net.bn1, net.relu)
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, image):
'''
:param image: [BxC_inxHxW] tensor of input image
:return: list of [BxC_outxHxW] tensors of output features
'''
y = image
feat_pyramid = []
y = self.layer0(y)
feat_pyramid.append(y)
y = self.layer1(self.pool(y))
feat_pyramid.append(y)
y = self.layer2(y)
feat_pyramid.append(y)
y = self.layer3(y)
feat_pyramid.append(y)
y = self.layer4(y)
feat_pyramid.append(y)
return feat_pyramid
|
PypiClean
|
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_tzm-latn.js
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"Zdat azal",
"\u1e0ceffir aza"
],
"DAY": [
"Asamas",
"Aynas",
"Asinas",
"Akras",
"Akwas",
"Asimwas",
"Asi\u1e0dyas"
],
"MONTH": [
"Yennayer",
"Yebrayer",
"Mars",
"Ibrir",
"Mayyu",
"Yunyu",
"Yulyuz",
"\u0194uct",
"Cutanbir",
"K\u1e6duber",
"Nwanbir",
"Dujanbir"
],
"SHORTDAY": [
"Asa",
"Ayn",
"Asn",
"Akr",
"Akw",
"Asm",
"As\u1e0d"
],
"SHORTMONTH": [
"Yen",
"Yeb",
"Mar",
"Ibr",
"May",
"Yun",
"Yul",
"\u0194uc",
"Cut",
"K\u1e6du",
"Nwa",
"Duj"
],
"fullDate": "EEEE, d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y h:mm:ss a",
"mediumDate": "d MMM y",
"mediumTime": "h:mm:ss a",
"short": "dd/MM/y h:mm a",
"shortDate": "dd/MM/y",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "tzm-latn",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/expertai_extract-0.1.2-py3-none-any.whl/expertai/extract/openapi/client/api/default_api.py
|
import re # noqa: F401
import sys # noqa: F401
from expertai.extract.openapi.client.api_client import ApiClient, Endpoint as _Endpoint
from expertai.extract.openapi.client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from expertai.extract.openapi.client.model.layout_document_async_response import LayoutDocumentAsyncResponse
from expertai.extract.openapi.client.model.layout_request import LayoutRequest
from expertai.extract.openapi.client.model.recognition_task_output import RecognitionTaskOutput
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.layout_document_async_post_endpoint = _Endpoint(
settings={
'response_type': (LayoutDocumentAsyncResponse,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/layout-document-async',
'operation_id': 'layout_document_async_post',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'layout_request',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'layout_request':
(LayoutRequest,),
},
'attribute_map': {
},
'location_map': {
'layout_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.status_task_id_get_endpoint = _Endpoint(
settings={
'response_type': (RecognitionTaskOutput,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/status/{task-id}',
'operation_id': 'status_task_id_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'task_id',
],
'required': [
'task_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'task_id':
(str,),
},
'attribute_map': {
'task_id': 'task-id',
},
'location_map': {
'task_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def layout_document_async_post(
self,
**kwargs
):
"""layout_document_async_post # noqa: E501
Asynchronous layout recognition. The response body is a JSON object containing the ID of the recognition task. The task ID must be used in the URL of the `status` resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.layout_document_async_post(async_req=True)
>>> result = thread.get()
Keyword Args:
layout_request (LayoutRequest): The document to be analyzed. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
LayoutDocumentAsyncResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.layout_document_async_post_endpoint.call_with_http_info(**kwargs)
def status_task_id_get(
self,
task_id,
**kwargs
):
"""status_task_id_get # noqa: E501
Returns the status of the layout recognition task or, if the task is finished, the results of the task. The task ID is that returned by the `layout-document-async` resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.status_task_id_get(task_id, async_req=True)
>>> result = thread.get()
Args:
task_id (str): Recognition task ID
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
RecognitionTaskOutput
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['task_id'] = \
task_id
return self.status_task_id_get_endpoint.call_with_http_info(**kwargs)
|
PypiClean
|
/beaker-common-28.3.tar.gz/beaker-common-28.3/bkr/common/pyconfig.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Copy and pasted from kobo.conf, from kobo 0.4.2-1
"""
Python-like syntax config parser.
USAGE:
settings = PyConfigParser()
settings.load_from_{conf, dict, file, string}(...)
print settings[var]
CONFIG FILE SYNTAX:
PyConfigParser accepts following python-like syntax:
- variable = <str, int, float, dict, list, tuple>
- variable = <other_variable>
- formatting strings can be used:
- variable = "%s %s" % (var1, var2)
- variable = "%(key1)s %(key2)s" % <dict>
- imports are supported:
- from <file_without_suffix> import *
- from <file_without_suffix> import var1, var2
"""
from __future__ import print_function
import os
import fnmatch
import itertools
import keyword
import token
import tokenize
import six
from six.moves import StringIO
class ImproperlyConfigured(Exception):
"""
Program is improperly configured.
"""
pass
def get_dict_value(dictionary, key):
"""
Return a value from a dictionary, if not found,
use 'glob' keys (*, ? metachars), then use default value with '*' key.
"""
if dictionary is None:
return None
if type(dictionary) is not dict:
raise TypeError("Dictionary expected, got %s." % type(dictionary))
try:
return dictionary[key]
except KeyError:
if isinstance(key, str):
matches = []
for pattern in six.iterkeys(dictionary):
if pattern == '*' or not isinstance(pattern, str):
# exclude '*', because it would match every time
continue
if fnmatch.fnmatchcase(key, pattern):
matches.append(pattern)
if len(matches) == 1:
return dictionary[matches[0]]
elif len(matches) > 1:
raise KeyError("Key matches multiple values: %s" % key)
if '*' in dictionary:
return dictionary['*']
raise
class PyConfigParser(dict):
"""
Python-like syntax config parser.
"""
get_dict_value = staticmethod(get_dict_value)
def __init__(self, config_file_suffix="conf", debug=False):
self._tok_number = None
self._tok_value = None
self._tok_begin = None
self._tok_end = None
self._tok_line = None
self._tok_name = None
self._config_file_suffix = config_file_suffix
self._debug = debug
self._open_file = None
self._tokens = None
def __getitem__(self, name):
if name.startswith("_"):
raise KeyError(name)
return dict.__getitem__(self, name)
def __setitem__(self, name, value):
if name.startswith("_"):
raise KeyError(name)
return dict.__setitem__(self, name, value)
def load_from_file(self, file_name):
"""
Load data data from a file.
"""
fo = open(file_name, "r")
data = fo.read()
fo.close()
self._open_file = file_name
self.load_from_string(data)
def load_from_string(self, input_string):
"""
Load data from a string.
"""
if input_string:
self._tokens = tokenize.generate_tokens(StringIO(input_string).readline)
for key, value in self._parse():
self[key] = value
def load_from_dict(self, input_dict):
"""
Load data from a dictionary.
"""
if input_dict is not None:
self.update(input_dict)
def load_from_conf(self, conf):
"""
Load data from another config.
"""
self.load_from_dict(conf)
def _parse(self):
"""
Parse config file and store results to this object.
"""
while True:
self._get_token()
if self._tok_value == "from":
self._get_from_import()
continue
if keyword.iskeyword(self._tok_value):
raise SyntaxError("Cannot assign to a python keyword: %s" % self._tok_value)
if self._tok_name == "ENDMARKER":
break
self._assert_token(("NAME", ))
key = self._tok_value
self._get_token()
self._assert_token(("OP", "="))
value = self._get_value()
yield key, value
def _assert_token(self, *args):
"""
Check if token has proper name and value.
*args are tuples (name, value), (name, )
"""
for i in args:
if len(i) == 1 and i == (self._tok_name, ):
return
if len(i) == 2 and i == (self._tok_name, self._tok_value):
return
raise SyntaxError("Invalid syntax: file: %s, begin: %s, end: %s, text: %s" % (self._open_file, self._tok_begin, self._tok_end, self._tok_line))
def _get_token(self, skip_newline=True):
"""
Get a new token from token generator.
"""
self._tok_number, self._tok_value, self._tok_begin, self._tok_end, self._tok_line = next(self._tokens)
self._tok_name = token.tok_name.get(self._tok_number, None)
if self._debug:
print("%2s %16s %s" % (self._tok_number, self._tok_name, self._tok_value.strip()))
# skip some tokens
if self._tok_name in ["COMMENT", "INDENT", "DEDENT"]:
self._get_token()
if skip_newline and self._tok_name in ["NL", "NEWLINE"]:
self._get_token()
def _get_NAME(self):
"""
Return a NAME token value.
"""
if self._tok_value == "False":
return False
if self._tok_value == "True":
return True
if self._tok_value == "None":
return None
# return already defined variable
return self[self._tok_value]
def _get_STRING(self):
"""
Return a STRING token value.
"""
# remove apostrophes or quotation marks
result = self._tok_value[1:-1]
# look at next token if "%s" follows the string
self._tokens, tmp = itertools.tee(self._tokens)
if next(tmp)[1:2] != ("%", ):
# just a regular string
return result
# string formatting is used
self._get_token()
self._assert_token(("OP", "%"))
values = self._get_value()
return result % values
def _get_NUMBER(self, negative=False):
"""
Return a NUMBER token value.
"""
if self._tok_value.find(".") != -1:
result = float(self._tok_value)
else:
result = int(self._tok_value)
if negative:
return -result
return result
def _get_value(self, get_next=True, basic_types_only=False):
"""
Get a value (number, string, other variable value, ...).
"""
if get_next:
self._get_token()
self._assert_token(("NAME", ), ("NUMBER", ), ("STRING", ), ("OP", "{"), ("OP", "["), ("OP", "("), ("OP", "-"))
if (self._tok_name, self._tok_value) == ("OP", "-"):
self._get_token()
self._assert_token(("NUMBER", ))
return self._get_NUMBER(negative=True)
if self._tok_name in ["NAME", "NUMBER", "STRING"]:
return getattr(self, "_get_%s" % self._tok_name)()
if not basic_types_only:
if (self._tok_name, self._tok_value) == ("OP", "{"):
return self._get_dict()
if (self._tok_name, self._tok_value) == ("OP", "["):
return self._get_list()
if (self._tok_name, self._tok_value) == ("OP", "("):
return self._get_tuple()
self._assert_token(("FOO", ))
def _get_from_import(self):
"""
Parse 'from <config> import <variables/*>'
and import <config> data to this object.
"""
file_name = ""
while True:
self._get_token()
if (self._tok_name, self._tok_value) == ("NAME", "import"):
break
file_name += str(self._tok_value)
file_name += "." + self._config_file_suffix
file_name = os.path.join(os.path.dirname(self._open_file), file_name)
self._assert_token(("NAME", "import"))
imports = []
self._get_token()
while self._tok_name not in ("NL", "NEWLINE"):
self._assert_token(("NAME", ), ("OP", "*"))
imports.append(self._tok_value)
self._get_token(skip_newline=False)
self._skip_commas(skip_newline=False)
imported_config = self.__class__(config_file_suffix=self._config_file_suffix, debug=self._debug)
imported_config.load_from_file(file_name)
if "*" in imports:
self.load_from_dict(imported_config)
else:
for key in imports:
try:
self[key] = imported_config[key]
except KeyError:
raise KeyError("Can't import %s from %s." % (key, file_name))
def _skip_commas(self, skip_newline=True):
"""
Skip OP tokens which contain commas.
"""
while (self._tok_name, self._tok_value) == ("OP", ","):
self._get_token(skip_newline)
def _get_dict(self):
"""
Get a dictionary content.
"""
result = {}
while True:
self._get_token()
self._skip_commas()
if (self._tok_name, self._tok_value) == ("OP", "}"):
break
key = self._get_value(get_next=False, basic_types_only=True)
self._get_token()
self._assert_token(("OP", ":"))
value = self._get_value()
result[key] = value
return result
def _get_list(self):
"""
Get a list content.
"""
result = []
while True:
self._get_token()
self._skip_commas()
if (self._tok_name, self._tok_value) == ("OP", "]"):
break
value = self._get_value(get_next=False)
result.append(value)
return result
def _get_tuple(self):
"""
Get a tuple content.
"""
result = []
while True:
self._get_token()
self._skip_commas()
if (self._tok_name, self._tok_value) == ("OP", ")"):
break
value = self._get_value(get_next=False)
result.append(value)
return tuple(result)
# settings is created only if PROJECT_CONFIG_FILE is set
if "PROJECT_CONFIG_FILE" in os.environ:
try:
settings = PyConfigParser()
settings.load_from_file(os.environ.get("PROJECT_CONFIG_FILE"))
except Exception as ex:
raise ImproperlyConfigured("Could not load config file: %s" % ex)
|
PypiClean
|
/django_template_forms-0.3.1-py3-none-any.whl/template_forms/base.py
|
from django.forms.forms import BaseForm
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from .utils import add_css_classes, try_classmro
class TemplateForm(BaseForm):
"""
Generic mixin that implements the template rendering logic for the fields and the form.
This mixin should precede `forms.Form` or `forms.ModelForm` to ensure that the correct
rendering function is called by default.
"""
form_template_name = None
# `field_template_map` should be a mapping of *widgets* to field template names. This
# may seem counterintuitive, but it is necessary as we're rendering the markup around
# the field's underlying widget, and this widget may be customized per-field.
field_template_map = None
outer_css_class = None
label_css_class = None
field_css_class = None
# Additional forms.Form attributes:
# - error_css_class
# - required_css_class
def format_hidden_error(self, name, error):
return _('(Hidden field %(name)s) %(error)s') % {
'name': name,
'error': force_text(error)
}
def get_form_template_name(self):
return self.form_template_name
def get_field_template_name(self, boundfield):
assert self.field_template_map is not None, \
'`{}.field_template_map` should be a mapping of widgets to template names.' \
.format(type(self).__name__)
return try_classmro(
self.field_template_map.get,
type(boundfield.field.widget)
)
def get_form_context(self):
# Errors that should be displayed above all fields.
top_errors = self.non_field_errors()
output, hidden_output = [], []
for name, field in self.fields.items():
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if not bf.is_hidden:
output.append(self.render_field(bf, bf_errors))
else:
hidden_output.append(str(bf))
top_errors += [self.format_hidden_error(name, e) for e in bf_errors]
return {
'top_errors': top_errors,
'hidden_output': hidden_output,
'output': output,
'form': self,
}
def get_field_context(self, bf, errors):
field_css_classes = self.get_field_css_classes(bf, errors)
label_css_classes = self.get_label_css_classes(bf, errors)
# add the field_css_class to the widget attrs
if field_css_classes:
add_css_classes(bf, ' '.join(field_css_classes))
# build the set of css classes for the outer html
# element that wraps the inner label/field.
outer_classes = bf.css_classes(self.outer_css_class)
# don't render label tag if label=''
label = bf.label_tag(
conditional_escape(force_text(bf.label)),
attrs={'class': ' '.join(label_css_classes)},
) if bf.label else ''
return {
'outer_classes': outer_classes,
'label': force_text(label),
'field': str(bf),
'help_text': mark_safe(bf.help_text),
'errors': errors,
'bf': bf,
}
def get_field_css_classes(self, bf, errors):
classes = []
if self.field_css_class:
classes.append(self.field_css_class)
# boundfield uses a hasattr check instead of 'is not None'
if errors and hasattr(self, 'error_css_class'):
classes.append(self.error_css_class)
return classes
def get_label_css_classes(self, bf, errors):
classes = []
if self.label_css_class:
classes.append(self.label_css_class)
return classes
def render_field(self, bf, errors):
template_name = self.get_field_template_name(bf)
context = self.get_field_context(bf, errors)
return mark_safe(self.renderer.render(template_name, context))
def render(self):
template_name = self.get_form_template_name()
context = self.get_form_context()
return mark_safe(self.renderer.render(template_name, context))
def __str__(self):
return self.render()
|
PypiClean
|
/giji-rtwo-0.1.1.tar.gz/giji-rtwo-0.1.1/giji_rtwo/drivers/openstack_network.py
|
import os
import netaddr
from threepio import logger
from giji_rtwo.drivers.common import _connect_to_heat, _connect_to_sahara, _connect_to_neutron, _connect_to_keystone_v3
from neutronclient.common.exceptions import NeutronClientException, NotFound
ROUTER_INTERFACE_NAMESPACE = (
'network:router_interface',
'network:router_interface_distributed',
'network:ha_router_replicated_interface'
)
class NetworkManager(object):
neutron = None
default_router = None
def __init__(self, *args, **kwargs):
self.default_router = kwargs.pop("router_name", None)
self.neutron, self.sahara, self.heat = self.new_connection(*args, **kwargs)
def new_connection(self, *args, **kwargs):
"""
Allows us to make another connection (As the user)
"""
#NOTE: This is a HACK that should be removed when we stop supporting "Legacy Openstack"
if 'auth_url' in kwargs and '/v2' in kwargs['auth_url']:
neutron = _connect_to_neutron(*args, **kwargs)
sahara = None
heat = None
elif 'session' not in kwargs:
if 'project_name' not in kwargs and 'tenant_name' in kwargs:
kwargs['project_name'] = kwargs['tenant_name']
(auth, session, token) = _connect_to_keystone_v3(**kwargs)
neutron = _connect_to_neutron(session=session)
sahara = _connect_to_sahara(session=session)
heat = _connect_to_heat(session=session)
else:
neutron = _connect_to_neutron(*args, **kwargs)
sahara = _connect_to_sahara(*args, **kwargs)
heat = _connect_to_heat(*args, **kwargs)
return neutron, sahara, heat
def tenant_networks(self, tenant_id=None):
if not tenant_id:
tenant_id = self.get_tenant_id()
tenant_nets = self.list_networks(tenant_id=tenant_id)
return tenant_nets
def get_tenant_id(self):
credentials = self.get_credentials()
try:
tenant_id = credentials.get('auth_tenant_id')
return tenant_id
except KeyError:
logger.warn(
"Key 'auth_tenant_id' no longer exists in"
"'get_credentials()'")
return None
def get_credentials(self):
"""
Return the user_id and tenant_id of the network manager
"""
auth_info = self.neutron.httpclient.get_auth_info()
if not auth_info.get('auth_tenant_id'):
self.list_networks()
auth_info = self.neutron.httpclient.get_auth_info()
auth_info.pop('auth_token')
return auth_info
##Admin-specific methods##
def project_network_map(self):
named_networks = self.find_network('-net', contains=True)
users_with_networks = [net['name'].replace('-net', '')
for net in named_networks]
user_map = {}
networks = self.list_networks()
subnets = self.list_subnets()
routers = self.list_routers()
ports = self.list_ports()
for user in users_with_networks:
my_nets = [net for net in networks if '%s-net' % user in net['name']]
net_ids = [n['id'] for n in my_nets]
my_subnets = [subnet for subnet in subnets if '%s-subnet' % user in subnet['name']]
subnet_ids = [s['id'] for s in my_subnets]
my_ports = []
for port in ports:
if 'dhcp' in port['device_owner'] or \
'compute:None' in port['device_owner']:
#Skip these ports..
continue
if port['network_id'] in net_ids:
my_ports.append(port)
continue
fixed_ips = port['fixed_ips']
for fixed_ip in fixed_ips:
if fixed_ip['subnet_id'] in subnet_ids:
my_ports.append(port)
break
#TODO: Can you have more than one of these?
if len(my_nets) == 1:
my_nets = my_nets[0]
if len(my_subnets) == 1:
my_subnets = my_subnets[0]
user_map[user] = {'network': my_nets,
'subnet': my_subnets,
'public_interface': my_ports}
logger.debug("Added user %s" % user_map[user])
return user_map
def get_user_neutron(self, username, password,
project_name, auth_url, region_name):
user_creds = {
'username': username,
'password': password,
'tenant_name': project_name,
'auth_url': auth_url,
'region_name': region_name
}
user_neutron = self.new_connection(**user_creds)
return user_neutron
def disassociate_floating_ip(self, server_id):
"""
Remove floating IP from the server <server_id>
* NO return value
* raises NeutronClientException if delete fails
"""
floating_ip_id = None
for f_ip in self.list_floating_ips():
if f_ip.get('instance_id') == server_id:
floating_ip_id = f_ip['id']
#No floating ip matches - Disassociate has nothing to do
if not floating_ip_id:
return
#Remove floating ip
deleted_ip = self.neutron.delete_floatingip(floating_ip_id)
return
def associate_floating_ip(self, server_id):
"""
Create a floating IP on the external network
Find port of new VM
Associate new floating IP with the port assigned to the new VM
"""
networks = self.list_networks()
external_networks = [net for net in networks
if net['router:external'] == True]
if not external_networks:
raise Exception("CONFIGURATION ERROR! No external networks found!"
" Cannot associate floating ip without it!"
" Create a fixed IP/port first!")
instance_ports = self.list_ports(device_id=server_id)
if not instance_ports:
raise Exception("No ports found with device_id == %s."
" Create a fixed IP/port first!" % server_id)
#TODO: Look at the network if it already has a floating ip, dont
#re-create
body = {'floatingip': {
'port_id': instance_ports[0]['id'],
'floating_network_id': external_networks[0]['id']
}}
new_ip = self.neutron.create_floatingip(body)['floatingip']
logger.info('Assigned Floating IP - %s:%s' % (server_id, new_ip))
return new_ip
def create_port(self, server_id, network_id, subnet_id=None,
ip_address=None, tenant_id=None, mac_address=None, name=None):
"""
Create a new (Fixed IP) Port between server id and the user network
"""
if not name:
name = 'fixed_ip_%s' % (server_id,)
port_data = {'port':
{
"tenant_id": tenant_id,
"network_id":network_id,
"device_id":server_id,
"fixed_ips": [{"subnet_id":subnet_id, "ip_address":
ip_address}],
'admin_state_up':True,
'name':name
}
}
if mac_address:
port_data['port']['mac_address'] = mac_address
if subnet_id and ip_address:
#In this case, we should attach the interface after the fact.
port_data['port'].pop('device_id')
port_obj = self.neutron.create_port(port_data)
return port_obj['port']
def find_server_ports(self, server_id):
"""
Find all the ports for a given server_id (device_id in port object).
"""
server_ports = []
all_ports = self.list_ports()
return [p for p in all_ports if p['device_id'] == server_id]
def list_floating_ips(self):
instance_ports = self.list_ports()
floating_ips = self.neutron.list_floatingips()['floatingips']
# Connect instances and floating_ips using ports.
for fip in floating_ips:
port = filter(lambda(p): p['id'] == fip['port_id'], instance_ports)
if port:
fip['instance_id'] = port[0]['device_id']
#logger.debug(floating_ips)
return floating_ips
def rename_security_group(self, project, security_group_name=None):
security_group_resp = self.neutron.list_security_groups(
tenant_id=project.id)
default_group_id = None
for sec_group in security_group_resp['security_groups']:
if 'default' in sec_group['name']:
default_group_id = sec_group['id']
break
if not default_group_id:
raise Exception("Could not find the security group named 'default'")
try:
if not security_group_name:
security_group_name = project.name
#FIXME: we don't actually name it?
sec_group = self.neutron.update_security_group(
default_group_id,
{"security_group": {"description": security_group_name}})
return sec_group
except NeutronClientException:
logger.exception("Problem updating description of 'default'"
"security group to %s" % project.name)
raise
##Libcloud-Neutron Interface##
@classmethod
def lc_driver_init(self, lc_driver, *args, **kwargs):
lc_driver_args = {
'username': lc_driver.key,
'password': lc_driver.secret,
'tenant_name': lc_driver._ex_tenant_name,
#Libcloud requires /v2.0/tokens -- OS clients do not.
'auth_url': lc_driver._ex_force_auth_url.replace('/tokens',''),
'region_name': lc_driver._ex_force_service_region}
lc_driver_args.update(kwargs)
manager = NetworkManager(*args, **lc_driver_args)
return manager
def lc_list_networks(self, *args, **kwargs):
"""
Call neutron list networks and convert to libcloud objects
"""
network_list = self.neutron.list_networks(*args, **kwargs)
return [self._to_lc_network(net) for net in network_list['networks']]
def _to_lc_network(self, net):
from libcloud.compute.drivers.openstack import OpenStackNetwork
return OpenStackNetwork(id=net['id'],
name=net['name'],
cidr=net.get('cidr', None),
extra=net,
driver=self)
##GET##
def get_network(self, network_id):
for net in self.neutron.list_networks()['networks']:
if network_id == net['id']:
return net
return None
def get_subnet(self, subnet_id):
for subnet in self.neutron.list_subnets()['subnets']:
if subnet_id == subnet['id']:
return subnet
return None
def get_port(self, port_id):
ports = self.list_ports()
if not ports:
return []
for port in ports:
if port['id'] == port_id:
return port
return None
##Easy Lists##
def list_networks(self, *args, **kwargs):
"""
NOTE: kwargs can be: tenant_id=, or any other attr listed in the
details of a network.
"""
return self.neutron.list_networks(*args, **kwargs)['networks']
def list_subnets(self):
return self.neutron.list_subnets()['subnets']
def list_routers(self):
return self.neutron.list_routers()['routers']
##LOOKUP##
def find_tenant_resources(self, tenant_id, instance_ids=[]):
networks = [net for net in self.list_networks()
if net['tenant_id'] == tenant_id]
ports = [port for port in self.list_ports()
if port['tenant_id'] == tenant_id
or port['device_id'] in instance_ids]
subnets = [subnet for subnet in self.list_subnets()
if subnet['tenant_id'] == tenant_id]
routers = [router for router in self.list_routers()
if router['tenant_id'] == tenant_id]
return {"ports": ports,
"networks": networks,
"subnets": subnets,
"routers": routers
}
def find_network(self, network_name, contains=False):
return [net for net in self.list_networks()
if network_name == net['name']
or (contains and network_name in net['name'])]
def find_subnet(self, subnet_name, contains=False):
return [net for net in self.list_subnets()
if subnet_name == net['name']
or (contains and subnet_name in net['name'])]
def find_router(self, router_name):
return [net for net in self.list_routers()
if router_name == net['name']]
def find_ports_for_router(self, router_name):
routers = self.find_router(router_name)
if not routers:
return []
router_id = routers[0]['id']
return [port for port in self.list_ports()
if router_id == port['device_id']]
def list_ports(self, **kwargs):
"""
Options:
subnet_id=subnet.id
device_id=device.id
ip_address=111.222.333.444
"""
return self.neutron.list_ports(**kwargs)['ports']
def find_router_interface(self, router, subnet):
#If no router/subnet, return None
if not router or not subnet:
return None
#If str router/subnet, find the obj
if type(router) != dict:
routers = self.find_router(router)
if not routers:
logger.info('Router %s does not exists' % router)
return None
router = routers[0]
if type(subnet) != dict:
subnets = self.find_subnet(subnet)
if not subnets:
logger.info('Subnet %s does not exists' % subnet)
return None
subnet = subnets[0]
#Return the router interfaces matching router+subnet
router_name = router['name']
subnet_id = subnet['id']
router_ports = self.find_ports_for_router(router_name)
router_interfaces = []
for port in router_ports:
if port['device_owner'] not in ROUTER_INTERFACE_NAMESPACE:
continue
subnet_match = False
for ip_subnet_obj in port['fixed_ips']:
if subnet_id in ip_subnet_obj['subnet_id']:
subnet_match = True
break
if subnet_match:
router_interfaces.append(port)
return router_interfaces
def find_router_gateway(self,
router_name,
external_network_name='ext_net'):
network_id = self.find_network(external_network_name)[0]['id']
routers = self.find_router(router_name)
if not routers:
return
return [r for r in routers if r.get('external_gateway_info') and
network_id in r['external_gateway_info'].get('network_id', '')]
##ADD##
def create_network(self, neutron, network_name):
existing_networks = self.find_network(network_name)
if existing_networks:
logger.info('Network %s already exists' % network_name)
return existing_networks[0]
network = {'name': network_name, 'admin_state_up': True}
network_obj = neutron.create_network({'network': network})
return network_obj['network']
def validate_cidr(self, cidr):
logger.info("Attempting to validate cidr %s" % cidr)
test_cidr_set = netaddr.IPSet([cidr])
all_subnets = [subnet for subnet in self.list_subnets()
if subnet.get('ip_version', 4) != 6]
all_subnet_ips = [sn['allocation_pools'] for sn in all_subnets]
for idx, subnet_ip_list in enumerate(all_subnet_ips):
for subnet_ip_range in subnet_ip_list:
(start, end) = (subnet_ip_range['start'], subnet_ip_range['end'])
if start.startswith('10') or end.startswith('10') or start.startswith('192') or end.startswith('192'):
continue
test_range = netaddr.IPRange(
subnet_ip_range['start'], subnet_ip_range['end'])
if len(test_range) > 1000:
continue
for ip in test_range:
if ip in test_cidr_set:
raise Exception("Overlap detected for CIDR %s and Subnet %s" % (cidr, all_subnets[idx]))
return True
def create_subnet(self, neutron, subnet_name,
network_id, ip_version=4, cidr=None,
dns_nameservers=[], subnet_pool_id=None):
existing_subnets = self.find_subnet(subnet_name)
if existing_subnets:
logger.info('Subnet %s already exists' % subnet_name)
return existing_subnets[0]
#self.validate_cidr(cidr)
subnet = {
'name': subnet_name,
'network_id': network_id,
'ip_version': ip_version,
}
if subnet_pool_id:
subnet['subnetpool_id'] = subnet_pool_id
else:
if not dns_nameservers:
dns_nameservers = ['8.8.8.8', '8.8.4.4']
subnet['dns_nameservers'] = dns_nameservers
subnet['cidr'] = cidr
logger.debug("Creating subnet - %s" % subnet)
subnet_obj = neutron.create_subnet({'subnet': subnet})
return subnet_obj['subnet']
def create_router(self, neutron, router_name):
existing_routers = self.find_router(router_name)
if existing_routers:
logger.info('Router %s already exists' % router_name)
return existing_routers[0]
router = {'name': router_name, 'admin_state_up': True}
router_obj = neutron.create_router({'router': router})
return router_obj['router']
def add_router_interface(self, router, subnet, interface_name=None):
existing_router_interfaces = self.find_router_interface(router, subnet)
if existing_router_interfaces:
logger.info('Router Interface for Subnet:%s-Router:%s already'
'exists' % (subnet['name'], router['name']))
return existing_router_interfaces[0]
body = {"subnet_id": subnet['id']}
interface_obj = self.neutron.add_interface_router(router['id'], body)
if interface_name:
self.neutron.update_port(
interface_obj['port_id'],
{"port":{"name":interface_name}})
return interface_obj
def set_router_gateway(self, neutron, router_name,
external_network_name='ext_net'):
"""
Must be run as admin
"""
existing_gateways = self.find_router_gateway(router_name,
external_network_name)
if existing_gateways:
logger.info('Router gateway for External Network:%s-Router:%s\
already exists' % (external_network_name, router_name))
return existing_gateways[0]
#Establish the router_gateway
router_id = self.get_router_id(self.neutron, router_name)
external_network = self.get_network_id(neutron, external_network_name)
body = {'network_id': external_network}
return self.neutron.add_gateway_router(router_id, body)
## LOOKUPS##
def get_subnet_id(self, neutron, subnet_name):
sn_list = neutron.list_subnets(name=subnet_name)
if sn_list and sn_list.get('subnets'):
return sn_list['subnets'][0]['id']
def get_router_id(self, neutron, router_name):
rt_list = neutron.list_routers(name=router_name)
if rt_list and rt_list.get('routers'):
return rt_list['routers'][0]['id']
def get_network_id(self, neutron, network_name):
nw_list = neutron.list_networks(name=network_name)
if nw_list and nw_list.get('networks'):
return nw_list['networks'][0]['id']
##DELETE##
def remove_router_gateway(self, router_name):
router_id = self.get_router_id(self.neutron, router_name)
if router_id:
return self.neutron.remove_gateway_router(router_id)
def remove_router_interface(self, neutron, router_name, subnet_name):
router_id = self.get_router_id(self.neutron, router_name)
subnet_id = self.get_subnet_id(neutron, subnet_name)
#FIXME: Ensure no instances/IPs are using the interface
# && raise an error if they try!
if router_id and subnet_id:
try:
return neutron\
.remove_interface_router(router_id,
{"subnet_id": subnet_id})
except NeutronClientException, neutron_err:
if 'no interface on subnet' in neutron_err:
#Attempted to delete a connection that does not exist.
#Ignore this conflict.
return
logger.exception("Problem deleting interface router"
" from router %s to subnet %s."
% (router_id, subnet_id))
raise
def delete_router(self, neutron, router_name):
router_id = self.get_router_id(self.neutron, router_name)
if router_id:
try:
return neutron.delete_router(router_id)
except:
logger.error("Problem deleting router: %s" % router_id)
raise
def delete_subnet(self, neutron, subnet_name):
subnet_id = self.get_subnet_id(neutron, subnet_name)
if subnet_id:
try:
return neutron.delete_subnet(subnet_id)
except:
logger.error("Problem deleting subnet: %s" % subnet_id)
raise
def delete_network(self, neutron, network_name):
network_id = self.get_network_id(neutron, network_name)
if network_id:
try:
return neutron.delete_network(network_id)
except:
logger.error("Problem deleting network: %s" % network_id)
raise
def delete_port(self, port):
return self.neutron.delete_port(port['id'])
|
PypiClean
|
/django-outlook-0.1.1.tar.gz/django-outlook-0.1.1/django_outlook/views.py
|
from django.views.generic import TemplateView, RedirectView
from django_outlook.o365_utils.adv_connection import OutlookConnection
from django_outlook.o365_utils.login_token_storage import LoginTokenStorage
from django_outlook.o365_utils.token_storage import TokenStorage
from djangoautoconf.django_utils import retrieve_param
from djangoautoconf.local_key_manager import get_local_key
class O365AuthRedirectView(RedirectView):
permanent = False # Not always redirect to the same page
def get_redirect_url(self, *args, **kwargs):
# article = get_object_or_404(Article, pk=kwargs['pk'])
# article.update_counter()
# return super().get_redirect_url(*args, **kwargs)
token_storage = TokenStorage(self.request.user)
c = OutlookConnection(
get_local_key("o365_app_settings.o365_app_client_id"),
get_local_key("o365_app_settings.o365_app_secret"),
token_storage,
)
auth_url = c.get_auth_url()
return auth_url
class OutlookAuthResultView(TemplateView):
template_name = 'django_outlook/key_got.html'
def get_context_data(self, **kwargs):
# return super(OutlookAuthResultView, self).get_context_data(**kwargs)
# param = retrieve_param(self.request)
token_storage = TokenStorage(self.request.user)
c = OutlookConnection(
get_local_key("o365_app_settings.o365_app_client_id"),
get_local_key("o365_app_settings.o365_app_secret"),
token_storage,
)
token_url = "%s/?%s" % ("https://localhost", self.request.META['QUERY_STRING'])
res = c.update_extra_data(token_url)
return res
class O365LoginRedirectView(RedirectView):
permanent = False # Not always redirect to the same page
def get_redirect_url(self, *args, **kwargs):
# article = get_object_or_404(Article, pk=kwargs['pk'])
# article.update_counter()
# return super().get_redirect_url(*args, **kwargs)
token_storage = LoginTokenStorage(self.request.user)
c = OutlookConnection(
get_local_key("o365_login_app_settings.o365_app_client_id"),
get_local_key("o365_login_app_settings.o365_app_secret"),
token_storage,
redirect_url='https://%s/django_outlook/login_result/' % self.request.get_host()
)
auth_url = c.get_auth_url()
return auth_url
class OutlookLoginResultView(TemplateView):
template_name = 'django_outlook/key_got.html'
def get_context_data(self, **kwargs):
# return super(OutlookLoginResultView, self).get_context_data(**kwargs)
# param = retrieve_param(self.request)
token_storage = LoginTokenStorage(self.request.user)
c = OutlookConnection(
get_local_key("o365_login_app_settings.o365_app_client_id"),
get_local_key("o365_login_app_settings.o365_app_secret"),
token_storage,
redirect_url='https://%s/django_outlook/login_result/' % self.request.get_host()
)
token_url = "%s/?%s" % ("https://localhost", self.request.META['QUERY_STRING'])
param = retrieve_param(self.request)
res = c.update_extra_data(token_url, param["state"])
return res
|
PypiClean
|
/zippyshare-0.0.7.tar.gz/zippyshare-0.0.7/README.md
|
# zippyshare
<badges>[](https://pypi.org/project/zippyshare/)
[](https://pypi.org/project/zippyshare/)
[](https://pypi.org/project/zippyshare/)
[](https://paypal.me/foxe6)
[](https://paypal.me/foxe6)
[](https://paypal.me/foxe6)
</badges>
<i>zippyshare API for batch remote upload.</i>
# Hierarchy
```
zippyshare
```
# Example
## python
See `test`.
|
PypiClean
|
/sync_music-0.5.0.tar.gz/sync_music-0.5.0/README.rst
|
.. image:: https://travis-ci.com/fetzerch/sync_music.svg?branch=master
:target: https://travis-ci.com/fetzerch/sync_music
:alt: Travis CI Status
.. image:: https://coveralls.io/repos/github/fetzerch/sync_music/badge.svg?branch=master
:target: https://coveralls.io/github/fetzerch/sync_music?branch=master
:alt: Coveralls Status
.. image:: https://img.shields.io/pypi/v/sync_music.svg
:target: https://pypi.org/project/sync_music
:alt: PyPI Version
sync_music - Sync music library to external devices
===================================================
This program allows you to synchronize your music library for the usage
on primitive music players that don't support the diversity of your
collection.
In normal operation mode, *sync_music* performs its synchronization tasks
depending on the input file format. Music files in FLAC, Ogg Vorbis and M4A
format are transcoded to MP3. MP3 audio files and other files are
transferred unchanged. Filenames are adapted where necessary to comply
with the FAT32 format. If preferred, *sync_music* can also forcefully
transcode all files in order to save disk space. Another operation mode
applies volume normalization based on ReplayGain_ tags.
Transcoding is a time consuming operation, therefore the first run of
*sync_music* can take several minutes. In subsequent runs however, it will
only process files that changed in the source. To optimize the detection of
file changes, the script stores and compares a hash build on a fixed size
block at the beginning of each file.
Besides audio files, *sync_music* is also able to export M3U playlists to
the destination folder. Absolute paths are hereby replaced with relative
paths in addition to the FAT32 filename adaptations.
Dependencies
------------
- Python >= 3.6
- FFmpeg_ (for transcoding to MP3)
- Mutagen_ >= 1.29 (for tag manipulation)
Installation
------------
The first step is to install FFmpeg_. Most Linux distributions offer packages
that can be directly installed. On Ubuntu 18.04 or later
there's an official package that can simply be installed using::
# apt install ffmpeg
Then *sync_music* can be installed from PyPI with::
# pip3 install sync_music
The following command installs the current development version::
# pip3 install https://github.com/fetzerch/sync_music/archive/master.zip
Usage
-----
Quick start
^^^^^^^^^^^
The following basic command synchronizes all audio files from the source to the
destination directory::
sync_music --audio-src=<FOLDER> --audio-dest=<FOLDER>
Additionally M3U playlist syncing can be enabled by specifying the path to the
playlists::
sync_music --audio-src=<FOLDER> --audio-dest=<FOLDER> --playlist-src=<FOLDER>
Besides that *sync_music* supports a number of advanced options. A full list of
supported options is available in the built in help message::
sync_music --help
Transcoding
^^^^^^^^^^^
The operation mode can be changed with the ``--mode`` parameter.
In ``transcode`` mode MP3 files are transcoded as well (instead of just copied
to the destination)::
sync_music --audio-src=<FOLDER> --audio-dest=<FOLDER> --mode=transcode
Transcoding MP3 files can lead to significantly smaller files if the source
contains many 320kbps CBR MP3s as the target rate is 190kbps VBR. The drawback
is that transcoding is slower and needs more CPU power.
The ``replaygain`` and ``replaygain-album`` modes apply (track or album) based
volume normalization from ReplayGain_ tags when transcoding::
sync_music --audio-src=<FOLDER> --audio-dest=<FOLDER> --mode=replaygain
Transcoding modes require that the MP3 files can be decoded by FFMpeg_ without
issues. Problematic input files can be analyzed and fixed for example with
`MP3 Diags`_.
Hacks
^^^^^
Some media players don't properly support album artist tags. This restriction
can be bypassed by writing the album artist information into the artist field.
This can be enabled by adding the ``--albumartist-artist-hack`` parameter.
Some media players don't properly support album artist tags, but they do
support the composer field. This restriction can be bypassed by writing
the album artist information into the composer field. This can be
enabled by adding the ``--albumartist-composer-hack`` parameter.
Some media players don't properly support artist tags. This restriction
can be bypassed by writing the artist information into the album artist field.
This can be enabled by adding the ``--artist-albumartist-hack`` parameter.
Some media players don't properly support disc number tags with tracks numbered
starting with 1 for every disc. The user typically wants to group them by disc
and not by track position. This can be solved by creating a different album for
each disc. With the ``--discnumber-hack`` option, the disc number is appended
to the album field.
Some media players don't properly support track number tags containing the
total number of tracks on the disk. With the ``--tracknumber-hack`` option, the
track total is removed from the track number field.
License
-------
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
`GNU General Public License <http://www.gnu.org/licenses/gpl-2.0.html>`_
for more details.
.. _FFMpeg: https://ffmpeg.org
.. _`MP3 Diags`: http://mp3diags.sourceforge.net
.. _Mutagen: https://mutagen.readthedocs.io
.. _ReplayGain: https://en.wikipedia.org/wiki/ReplayGain
|
PypiClean
|
/esdrt.content-1.74.3.zip/esdrt.content-1.74.3/esdrt/content/vocabularies.py
|
import itertools
from plone import api
from zope.interface import implementer
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleVocabulary
import esdrt.content.constants as C
from esdrt.content.utils import request_context
def mk_term(key, value):
return SimpleVocabulary.createTerm(key, key, value)
@implementer(IVocabularyFactory)
class MSVocabulary(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('eea_member_states')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class GHGSourceCategory(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('ghg_source_category')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class GHGSourceSectors(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('ghg_source_sectors')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class Gas(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('gas')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class Fuel(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('fuel')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class Highlight(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('highlight')
# In some cases (such as a form group) the context can be a dict or
# something else that's not a true Plone context.
# Attempt to get the true context from the request.
context = request_context(context)
terms = []
if voc is not None:
from esdrt.content.reviewfolder import ReviewFolderMixin
# [refs #159093]
internal_flags = getattr(context, "internal_highlights", []) or []
can_view_internal_flags = (
ReviewFolderMixin.can_view_internal_flags()
)
# [refs #159094]
excluded_highlights = getattr(
context, "excluded_highlights", []) or []
for key, value in voc.getVocabularyLines():
# [refs #159093]
if key in internal_flags and not can_view_internal_flags:
continue
# [refs #159094]
if key in excluded_highlights:
continue
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class HighlightSelect(object):
""" Clean version of the highlight vocabulary,
used to filter the actual highlight vocabulary """
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('highlight')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class Parameter(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('parameter')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class StatusFlag(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('status_flag')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
from .crf_code_matching import crf_codes
@implementer(IVocabularyFactory)
class CRFCode(object):
def __call__(self, context):
terms = []
crfcodes = crf_codes()
for key, value in crfcodes.items():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value['title']))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class Conclusions(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('conclusion_reasons')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class ConclusionsPhase2(object):
def __call__(self, context):
pvoc = api.portal.get_tool('portal_vocabularies')
voc = pvoc.getVocabularyByName('conclusion_phase2_reasons')
terms = []
if voc is not None:
for key, value in voc.getVocabularyLines():
# create a term - the arguments are the value, the token, and
# the title (optional)
terms.append(SimpleVocabulary.createTerm(key, key, value))
return SimpleVocabulary(terms)
@implementer(IVocabularyFactory)
class Roles(object):
def __call__(self, context):
terms = list(itertools.starmap(
mk_term, [
('Manager', 'Manager'),
(C.ROLE_SE, 'Sector Expert'),
(C.ROLE_RE, 'Review Expert'),
(C.ROLE_QE, 'Quality Expert'),
(C.ROLE_LR, 'Lead Reviewer'),
(C.ROLE_RP1, 'Reviewer Phase 1'),
(C.ROLE_RP2, 'Reviewer Phase 2'),
(C.ROLE_MSA, 'MS Authority'),
(C.ROLE_MSE, 'MS Expert'),
]))
return SimpleVocabulary(terms)
|
PypiClean
|
/procustodibus_agent-1.3.3-py3-none-any.whl/procustodibus_agent/mfa/cli.py
|
from docopt import docopt
from procustodibus_agent import __version__ as version
from procustodibus_agent.cnf import Cnf
from procustodibus_agent.mfa import check_mfa, do_mfa, list_mfa, read_password
def main():
"""Tool entry point."""
args = docopt(__doc__)
if args["--version"]:
# print version to stdout
print("procustodibus-mfa " + version) # noqa: T201
elif args["--auth"]:
authenticate(
args["--endpoint"],
args["--auth"],
args["--password-fd"],
args["--secondary-code"],
args["--secondary-fd"],
args["--secondary-prompt"],
args["--config"],
args["--verbosity"] or args["-v"],
)
elif args["--check"]:
check(args["--endpoint"], args["--config"])
else:
run(args["--config"], args["--verbosity"] or args["-v"])
# simpler to keep this logic together rather than subdivide it more functions
def authenticate( # noqa: CFQ002
endpoint,
user,
password_input=None,
secondary_code=None,
secondary_input=None,
secondary_prompt=False,
cnf_file="",
verbosity=None,
):
"""Authenticates the specified user for the specified endpoint.
Arguments:
endpoint (str): Endpoint ID.
user (str): User ID.
password_input (str): Optional file descriptor from which to read password.
secondary_code (str): Optional secondary verification code.
secondary_input (str): Optional file descriptor from which to read code.
secondary_prompt (bool): True to prompt for code.
cnf_file (str): Optional path to config file.
verbosity: Root log level.
"""
if secondary_prompt:
secondary_prompt = "Secondary code: "
cnf = Cnf(cnf_file, verbosity)
password = read_password(None, password_input, True)
secondary_code = read_password(secondary_code, secondary_input, secondary_prompt)
do_mfa(cnf, endpoint, user, password, secondary_code)
def check(endpoint, cnf_file=""):
"""Checks the MFA state of the specified endpoint.
Arguments:
endpoint (str): Endpoint ID.
cnf_file (str): Path to config file (optional).
"""
cnf = Cnf(cnf_file)
check_mfa(cnf, endpoint)
def run(*args):
"""Lists MFA state for all endpoints of the configured host.
Arguments:
*args (list): List of arguments to pass to Cnf constructor.
"""
cnf = Cnf(*args)
list_mfa(cnf)
if __name__ == "__main__":
main()
|
PypiClean
|
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/integrations/handlers/ludwig_handler/ludwig_handler.py
|
from typing import Optional
import dill
import dask
import pandas as pd
from ludwig.automl import auto_train
from mindsdb.integrations.libs.base import BaseMLEngine
from .utils import RayConnection
class LudwigHandler(BaseMLEngine):
"""
Integration with the Ludwig declarative ML library.
""" # noqa
name = 'ludwig'
def create(self, target: str, df: Optional[pd.DataFrame] = None, args: Optional[dict] = None) -> None:
args = args['using'] # ignore the rest of the problem definition
# TODO: filter out incompatible use cases (e.g. time series won't work currently)
# TODO: enable custom values via `args` (mindful of local vs cloud)
user_config = {'hyperopt': {'executor': {'gpu_resources_per_trial': 0, 'num_samples': 3}}} # no GPU for now
with RayConnection():
results = auto_train(
dataset=df,
target=target,
tune_for_memory=False,
time_limit_s=120,
user_config=user_config,
# output_directory='./',
# random_seed=42,
# use_reference_config=False,
# kwargs={}
)
model = results.best_model
args['dtype_dict'] = {f['name']: f['type'] for f in model.base_config['input_features']}
args['accuracies'] = {'metric': results.experiment_analysis.best_result['metric_score']}
self.model_storage.json_set('args', args)
self.model_storage.file_set('model', dill.dumps(model))
def predict(self, df, args=None):
model = dill.loads(self.model_storage.file_get('model'))
with RayConnection():
predictions = self._call_model(df, model)
return predictions
@staticmethod
def _call_model(df, model):
predictions = dask.compute(model.predict(df)[0])[0]
target_name = model.config['output_features'][0]['column']
if target_name not in df:
predictions.columns = [target_name]
else:
predictions.columns = ['prediction']
predictions[f'{target_name}_explain'] = None
joined = df.join(predictions)
if 'prediction' in joined:
joined = joined.rename({
target_name: f'{target_name}_original',
'prediction': target_name
}, axis=1)
return joined
|
PypiClean
|
/skeem-0.1.0.tar.gz/skeem-0.1.0/doc/development.rst
|
###########
Development
###########
*******
Sandbox
*******
Acquire sources, create Python virtualenv, install package and dependencies,
and run software tests::
git clone https://github.com/daq-tools/skeem
cd skeem
python3 -m venv .venv
source .venv/bin/activate
pip install --use-pep517 --prefer-binary --editable=.[test,develop,release,scientific]
# Run linter and regular test suite.
poe check
# Run "roadrunner" tests, using a bunch of external resources. The tests will
# only check for a successful invocation, and not verify the generated SQL.
poe test-roadrunner
************
Code tracing
************
Skeem uses `Hunter`_ for code tracing, in order to make it easy to identify
hot spots visually. `Hunter`_ is a flexible code tracing toolkit, for
debugging, logging, inspection and other nefarious purposes.
For tracing function invocations through ``skeem`` and important 3rd-party
modules, use the ``--trace-modules=`` option. Examples:
- ``--trace-modules=frictionless`` will trace code execution for the
``frictionless`` module.
- ``--trace-modules=skeem,frictionless, pandas`` will trace code execution for
the ``skeem``, ``frictionless``, and ``pandas`` modules.
- ``--trace-modules=machinery`` has a special meaning, and will resolve to the
module list ``["skeem", "fastparquet", "frictionless", "fsspec", "pandas"]``.
****************
Build OCI images
****************
OCI images will be automatically published to the GitHub Container Registry
(GHCR), see `Skeem packages on GHCR`_. If you want to build images on your
machine, you can use those commands::
export DOCKER_BUILDKIT=1
export COMPOSE_DOCKER_CLI_BUILD=1
export BUILDKIT_PROGRESS=plain
docker build --tag local/skeem-standard --file release/oci/standard.Dockerfile .
docker build --tag local/skeem-full --file release/oci/full.Dockerfile .
::
docker run --rm -it local/skeem-standard skeem --version
docker run --rm -it local/skeem-standard skeem info
.. _Hunter: https://pypi.org/project/hunter/
.. _Skeem packages on GHCR: https://github.com/orgs/daq-tools/packages?repo_name=skeem
|
PypiClean
|
/test-imufusion-0.0.77.tar.gz/test-imufusion-0.0.77/LICENSE.md
|
The MIT License (MIT)
Copyright (c) 2021 x-io Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
PypiClean
|
/align-0.0.11.tar.gz/align-0.0.11/README.md
|
# ALIGN, a computational tool for multi-level language analysis (optimized for Python 3)
`align` is a Python library for extracting quantitative, reproducible
metrics of multi-level alignment between two speakers in naturalistic
language corpora. The method was introduced in "ALIGN: Analyzing
Linguistic Interactions with Generalizable techNiques" (Duran, Paxton, &
Fusaroli, 2019; Psychological Methods).
<!--
## Try out `align` with Binder
Interested in seeing how `align` works, but not sure if you want to install it
yet? Try it out through Binder. Click the "launch" button to get a complete
cloud environment to try out the ALIGN pipeline on our Python tutorials (the CHILDES
tutorial is currently the only one fully operational). The process for Binder to launch may
take several minutes.
[](https://mybinder.org/v2/gh/nickduran/align-linguistic-alignment/master)
-->
## Installation
`align` may downloaded directly using `pip`.
To download the stable version released on PyPI:
```
pip install align
```
To download directly from our GitHub repo:
```
pip install git+https://github.com/nickduran/align-linguistic-alignment.git
```
## Additional tools required for some `align` options
The Google News pre-trained word2vec vectors (`GoogleNews-vectors-negative300.bin`)
and the Stanford part-of-speech tagger (`stanford-postagger-full-2018-10-16`)
are required for some optional `align` parameters but must be downloaded
separately.
* Google News: https://code.google.com/archive/p/word2vec/ (page) or
https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing
(direct download)
* Stanford POS tagger: https://nlp.stanford.edu/software/tagger.shtml#Download (page)
or https://nlp.stanford.edu/software/stanford-postagger-full-2018-10-16.zip
(direct download)
## Tutorials
We created Jupyter Notebook tutorials to provide an easily accessible
step-by-step walkthrough on how to use `align`. Below are descriptions of the
current tutorials that can be found in the `examples` directory within this
repository. If unfamiliar with Jupyter Notebooks, instructions for installing
and running can be found here: http://jupyter.org/install. We recommend installing
Jupyter using Anaconda. Anaconda is a widely-used Python data science platform
that helps streamline workflows. A major advantage is that Anaconda also makes it easy
to set up unique Python environments - which may be necessary to run `align`
and the tutorials given `align` is currently optimized for Python 3.
* **Jupyter Notebook 1: CHILDES**
* This tutorial walks users through an analysis of conversations from a
single English corpus from the CHILDES database (MacWhinney,
2000)---specifically, Kuczaj’s Abe corpus (Kuczaj, 1976). We analyze the
last 20 conversations in the corpus in order to explore how ALIGN can be
used to track multi-level linguistic alignment between a parent and child
over time, which may be of interest to developmental language researchers.
Specifically, we explore how alignment between a parent and a child
changes over a brief span of developmental trajectory.
* **Jupyter Notebook 2: Devil's Advocate**
* This tutorial walks users throught the analysis reported in (Duran,
Paxton, & Fusaroli, 2019). The corpus consists of 94 written
transcripts of conversations, lasting eight minutes each, collected from
an experimental study of truthful and deceptive communication. The goal
of the study was to examine interpersonal linguistic alignment between
dyads across two conversations where participants either agreed or
disagreed with each other (as a randomly assigned between-dyads condition)
and where one of the conversations involved the truth and the other
deception (as a within-subjects condition).
We are in the process of adding more tutorials and would welcome additional
tutorials by interested contributors.
## Attribution
If you find the package useful, please cite our manuscript:
>Duran, N., Paxton, A., & Fusaroli, R. (2019). ALIGN: Analyzing
> Linguistic Interactions with Generalizable techNiques. *Psychological Methods*. http://dynamicog.org/papers/
## Licensing of example data
* **CHILDES**
* Example corpus "Kuczaj Corpus" by Stan Kuczaj is licensed under a
Creative Commons Attribution-ShareAlike 3.0 Unported License
(https://childes.talkbank.org/access/Eng-NA/Kuczaj.html):
> Kuczaj, S. (1977). The acquisition of regular and irregular past tense
> forms. *Journal of Verbal Learning and Verbal Behavior, 16*, 589–600.
* **Devil's Advocate**
* The complete de-identified dataset of raw conversational transcripts
is hosted on a secure protected-access repository provided by the
Inter-university Consortium for Political and Social Research
(ICPSR). Please click on the link to access: http://dx.doi.org/10.3886/ICPSR37124.v1.
Due to the requirements of our IRB, please note that users interested in
obtaining these data must complete a Restricted Data Use Agreement, specify
the reason for the request, and obtain IRB approval or notice of exemption for their research.
> Duran, Nicholas, Alexandra Paxton, and Riccardo
> Fusaroli. Conversational Transcripts of Truthful and
> Deceptive Speech Involving Controversial Topics,
> Central California, 2012. ICPSR37124-v1. Ann Arbor,
> MI: Inter-university Consortium for Political and
> Social Research [distributor], 2018-08-29.
|
PypiClean
|
/covmatic_robotmanager-0.0.6-py3-none-any.whl/covmatic_robotmanager/positions.py
|
import json
import os
import logging
class Positions:
def __init__(self,
positions_file_path: str,
create_file: bool = False,
logger=logging.getLogger("robotmanager.positions")):
self._logger = logger
self._abs_path = os.path.abspath(positions_file_path)
self._logger.info("Checking path {}...".format(self._abs_path))
if not os.path.exists(self._abs_path):
if create_file:
self._logger.info("Position file not existing... Creating a new one.")
with open(self._abs_path, "w") as fp:
json.dump(dict(), fp)
else:
raise Exception("Position file passed must exist: {}".format(self._abs_path))
with open(self._abs_path, "r") as fp:
self._positions = json.load(fp)
self._logger.debug("Loaded positions: {}".format(self._positions))
def save(self, name, field, data):
self._logger.info("Saving position name {} field {} data {}".format(name, field, data))
if not name in self._positions:
self._logger.info("Creating position {}".format(name))
self._positions[name] = {}
self._positions[name][field] = data
self._save_positions()
def save_joints(self, name, data):
self.save(name, "joints", data)
def save_xyz(self, name, data):
self.save(name, "xyz", data)
def get_position(self, name: str):
self._logger.info("Requested position {}".format(name))
if name in self._positions:
return self._positions[name]
raise Exception("Position {} not found.".format(name))
def get_joints(self, name):
return self.get_position(name)["joints"]
def get_xyz(self, name):
return self.get_position(name)["xyz"]
def _save_positions(self):
self._logger.info("Saving positions to file {}".format(self._abs_path))
with open(self._abs_path, "w") as fp:
json.dump(self._positions, fp)
@staticmethod
def get_pos_owner(name: str):
return name.split("-")[0]
|
PypiClean
|
/google-cloud-aiplatform-1.31.1.tar.gz/google-cloud-aiplatform-1.31.1/google/cloud/aiplatform_v1/types/context.py
|
from __future__ import annotations
from typing import MutableMapping, MutableSequence
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"Context",
},
)
class Context(proto.Message):
r"""Instance of a general context.
Attributes:
name (str):
Immutable. The resource name of the Context.
display_name (str):
User provided display name of the Context.
May be up to 128 Unicode characters.
etag (str):
An eTag used to perform consistent
read-modify-write updates. If not set, a blind
"overwrite" update happens.
labels (MutableMapping[str, str]):
The labels with user-defined metadata to
organize your Contexts.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed. No more than 64 user labels can be
associated with one Context (System labels are
excluded).
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Context was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Timestamp when this Context was
last updated.
parent_contexts (MutableSequence[str]):
Output only. A list of resource names of Contexts that are
parents of this Context. A Context may have at most 10
parent_contexts.
schema_title (str):
The title of the schema describing the
metadata.
Schema title and version is expected to be
registered in earlier Create Schema calls. And
both are used together as unique identifiers to
identify schemas within the local metadata
store.
schema_version (str):
The version of the schema in schema_name to use.
Schema title and version is expected to be registered in
earlier Create Schema calls. And both are used together as
unique identifiers to identify schemas within the local
metadata store.
metadata (google.protobuf.struct_pb2.Struct):
Properties of the Context.
Top level metadata keys' heading and trailing
spaces will be trimmed. The size of this field
should not exceed 200KB.
description (str):
Description of the Context
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
display_name: str = proto.Field(
proto.STRING,
number=2,
)
etag: str = proto.Field(
proto.STRING,
number=8,
)
labels: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=9,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=10,
message=timestamp_pb2.Timestamp,
)
update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=11,
message=timestamp_pb2.Timestamp,
)
parent_contexts: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=12,
)
schema_title: str = proto.Field(
proto.STRING,
number=13,
)
schema_version: str = proto.Field(
proto.STRING,
number=14,
)
metadata: struct_pb2.Struct = proto.Field(
proto.MESSAGE,
number=15,
message=struct_pb2.Struct,
)
description: str = proto.Field(
proto.STRING,
number=16,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
PypiClean
|
/rios.core-0.9.0.tar.gz/rios.core-0.9.0/src/rios/core/validation/interaction.py
|
import colander
from .common import ValidationError, sub_schema, LanguageTag, \
IdentifierString, Options, LocalizedString, DescriptorList, \
LocalizationChecker, validate_instrument_version, guard, guard_sequence, \
MetadataCollection, RE_PRODUCT_TOKENS
from .instrument import InstrumentReference, TYPES_COMPLEX, \
get_full_type_definition
__all__ = (
'Interaction',
)
STEP_TYPES_ALL = (
'question',
'text',
)
METADATA_PROPS = {
'author': colander.SchemaNode(
colander.String(),
),
'copyright': colander.SchemaNode(
colander.String(),
),
'homepage': colander.SchemaNode(
colander.String(),
validator=colander.url,
),
'generator': colander.SchemaNode(
colander.String(),
validator=colander.Regex(RE_PRODUCT_TOKENS),
),
}
# pylint: disable=abstract-method
class StepType(colander.SchemaNode):
schema_type = colander.String
validator = colander.OneOf(STEP_TYPES_ALL)
class TextStepOptions(colander.SchemaNode):
text = LocalizedString()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TextStepOptions, self).__init__(*args, **kwargs)
class QuestionStepOptions(colander.SchemaNode):
fieldId = IdentifierString() # noqa: N815
text = LocalizedString()
error = LocalizedString(missing=colander.drop)
enumerations = DescriptorList(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(QuestionStepOptions, self).__init__(*args, **kwargs)
STEP_TYPE_OPTION_VALIDATORS = {
'question': QuestionStepOptions(),
'text': TextStepOptions(),
}
class Step(colander.SchemaNode):
type = StepType()
options = Options(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Step, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
step_type = cstruct.get('type', None)
validator = STEP_TYPE_OPTION_VALIDATORS.get(step_type, None)
options = cstruct.get('options', None)
if validator:
sub_schema(
validator,
node.get('options'),
options,
)
elif options is not None:
raise ValidationError(
node.get('options'),
'"%s" step do not accept options' % step_type,
)
class StepList(colander.SequenceSchema):
step = Step()
validator = colander.Length(min=1)
class Threshold(colander.SchemaNode):
schema_type = colander.Integer
validator = colander.Range(min=1)
class TimeoutDetails(colander.SchemaNode):
threshold = Threshold()
text = LocalizedString()
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(TimeoutDetails, self).__init__(*args, **kwargs)
class Timeout(colander.SchemaNode):
warn = TimeoutDetails(missing=colander.drop)
abort = TimeoutDetails(missing=colander.drop)
def __init__(self, *args, **kwargs):
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Timeout, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
if not cstruct.get('warn') and not cstruct.get('abort'):
raise ValidationError(
node,
'At least one of "warn" or "abort" must be defined',
)
class Interaction(colander.SchemaNode):
instrument = InstrumentReference()
defaultLocalization = LanguageTag() # noqa: N815
defaultTimeout = Timeout(missing=colander.drop) # noqa: N815
steps = StepList()
meta = MetadataCollection(
METADATA_PROPS,
missing=colander.drop,
)
def __init__(self, *args, **kwargs):
self.instrument = kwargs.pop('instrument', None)
kwargs['typ'] = colander.Mapping(unknown='raise')
super(Interaction, self).__init__(*args, **kwargs)
def validator(self, node, cstruct):
self._check_localizations(node, cstruct)
if not self.instrument:
return
validate_instrument_version(
self.instrument,
cstruct,
node.get('instrument'),
)
self._check_fields_covered(node, cstruct)
self._check_type_specifics(node, cstruct)
def _check_localizations(self, node, cstruct):
with guard(node.get('defaultTimeout')) as dtnode:
timeouts = cstruct.get('defaultTimeout', {})
for level in ('warn', 'abort'):
if level in timeouts:
checker = LocalizationChecker(
dtnode.get(level),
cstruct['defaultLocalization'],
)
checker.ensure(
timeouts[level],
'text',
scope='Timeout %s Text' % level,
)
for sidx, step in enumerate(cstruct['steps']):
with guard_sequence(node, 'step', sidx) as snode:
if 'options' not in step: # pragma: no cover
return
checker = LocalizationChecker(
snode.get('options'),
cstruct['defaultLocalization'],
)
options = step['options']
checker.ensure(options, 'text', scope='Step Text')
checker.ensure(options, 'error', scope='Step Error')
for enumeration in options.get('enumerations', []):
checker.ensure_descriptor(enumeration, scope='Enumeration')
def _check_fields_covered(self, node, cstruct):
instrument_fields = set([
field['id']
for field in self.instrument['record']
])
intr_fields = set()
for step in cstruct['steps']:
if step['type'] != 'question':
continue
field_id = step['options']['fieldId']
if field_id in intr_fields:
raise ValidationError(
node.get('steps'),
'Field "%s" is addressed by more than one question' % (
field_id,
)
)
intr_fields.add(field_id)
missing = instrument_fields - intr_fields
if missing:
raise ValidationError(
node.get('steps'),
'There are Instrument fields which are missing: %s' % (
', '.join(missing),
)
)
extra = intr_fields - instrument_fields
if extra:
raise ValidationError(
node.get('steps'),
'There are extra fields referenced by questions: %s' % (
', '.join(extra),
)
)
def _get_instrument_field(self, name):
for field in self.instrument['record']:
if field['id'] == name:
return field
return None
def _check_type_specifics(self, node, cstruct):
for sidx, step in enumerate(cstruct['steps']):
with guard_sequence(node, 'step', sidx) as snode:
if step['type'] != 'question':
continue
type_def = get_full_type_definition(
self.instrument,
self._get_instrument_field(
step['options']['fieldId'],
)['type'],
)
if type_def['base'] in TYPES_COMPLEX:
raise ValidationError(
snode.get('options'),
'Complex Instrument Types are not allowed in'
' Interactions',
)
if 'enumerations' in step['options']:
if type_def['base'] in ('enumeration', 'enumerationSet'):
described_choices = [
desc['id']
for desc in step['options']['enumerations']
]
actual_choices = list(type_def['enumerations'].keys())
for described_choice in described_choices:
if described_choice not in actual_choices:
raise ValidationError(
snode.get('options'),
'Field "%s" describes an invalid'
' enumeration "%s"' % (
step['options']['fieldId'],
described_choice,
),
)
else:
raise ValidationError(
snode.get('options'),
'Field "%s" cannot have an enumerations'
' configuration' % (
step['options']['fieldId'],
),
)
|
PypiClean
|
/rawdatx-0.1.zip/rawdatx-0.1/docs/index.rst
|
.. rawdatx documentation master file
rawdatx
=======
rawdatx is a Python 2.7, 3.4, 3.5 converter that generates Excel xlsx files
from TOA5 comma-separated text files produced by Campbell Scientific LoggerNet.
Sensor input, processing instructions, and output structure are specified
in a single XML Definition File that also serves as documentation.
Installation
------------
The following prerequisites need to be installed:
* Python 2.7, 3.4, or 3.5
* numpy 1.9 or higher
* xlsxwriter
optionally (recommended):
* lxml
* asteval
The easiest way to install rawdatx is through pip:
``pip install rawdatx``
Alternatively, download the latest version from the repository
`<https://github.com/cpetrich/rawdatx>`_ and install with
``python setup.py install``.
Usage
-----
To convert a TOA5 file to XLSX, run the following script::
import rawdatx.read_TOA5 as read_raw_data
import rawdatx.process_XML as process_XML
config = './config.cfg'
read_raw_data.main(config)
process_XML.main(config)
Input and output files are specified in an UTF-8 encoded
configuration file ``config.cfg``:
.. code-block:: ini
[RawData]
raw_data_path = ./raw-data/
mask = CR1000_*.dat
logger_time_zone = UTC+1
[Metadata]
Project = My project name
[Files]
xml_map_path = ./
xml_map = data_map.xml
data_path = ./
processed_data_xlsx = processed_data.xlsx
xml_dtd_out = data_map.dtd
raw_data = consolidated_raw_data.npy
processed_data_npy = processed_data.npy
The ``[RawData]`` section specifies the location of the logger input files,
the ``[Metadata]`` section defines metadata entries copied into the
XLSX file, and the ``[Files]`` section specifies path and file names of
output and intermediate files (``data_path``) and input
XML Definition File (``xml_map_path`` and ``xml_map``).
The XML Definition File (``data_map.xml``) may look like this:
.. code-block:: xml
<?xml version="1.0" encoding="UTF-8" ?>
<measurements from="2015/05/03 11:45">
<group name="Logger">
<map name="Battery Voltage" unit="V" src="Batt_V" />
<map name="Internal Temperature" unit="°C" src="T_panel" />
</group>
<group name="Weather">
<map name="Air Temperature" unit="°C" src="T_air" />
<map name="Relative Humidity" unit="%" src="RH" />
<map name="Wind Speed" unit="m/s" src="Wind_speed" />
<map name="Wind Direction" unit="°" src="Wind_direction" />
</group>
</measurements>
See also examples and test files in the repository at
`<https://github.com/cpetrich/rawdatx>`_.
Background
==========
Availability
------------
The code is available under the MIT license.
The project is hosted at `<https://github.com/cpetrich/rawdatx>`_
and packages are available on PyPI at `<https://pypi.python.org/pypi/rawdatx/>`_.
Documentation is available at `<https://rawdatx.readthedocs.org/>`_.
Author
------
Chris Petrich
Contents
========
.. toctree::
:maxdepth: 2
xml_definition
config_file
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
|
PypiClean
|
/django-allauth-james-0.20.0.tar.gz/django-allauth-james-0.20.0/allauth/account/auth_backends.py
|
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
from ..utils import get_user_model
from .app_settings import AuthenticationMethod
from . import app_settings
class AuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
ret = None
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
ret = self._authenticate_by_email(**credentials)
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME_EMAIL:
ret = self._authenticate_by_email(**credentials)
if not ret:
ret = self._authenticate_by_username(**credentials)
else:
ret = self._authenticate_by_username(**credentials)
return ret
def _authenticate_by_username(self, **credentials):
username_field = app_settings.USER_MODEL_USERNAME_FIELD
username = credentials.get('username')
password = credentials.get('password')
User = get_user_model()
if not username_field or username is None or password is None:
return None
try:
# Username query is case insensitive
query = {username_field+'__iexact': username}
user = User.objects.get(**query)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def _authenticate_by_email(self, **credentials):
# Even though allauth will pass along `email`, other apps may
# not respect this setting. For example, when using
# django-tastypie basic authentication, the login is always
# passed as `username`. So let's place nice with other apps
# and use username as fallback
User = get_user_model()
email = credentials.get('email', credentials.get('username'))
if email:
users = User.objects.filter(Q(email__iexact=email)
| Q(emailaddress__email__iexact=email))
for user in users:
if user.check_password(credentials["password"]):
return user
return None
|
PypiClean
|
/python-sources-3.10.5.tar.gz/python-sources-3.10.5/Python-3.10.5/Doc/library/numbers.rst
|
:mod:`numbers` --- Numeric abstract base classes
================================================
.. module:: numbers
:synopsis: Numeric abstract base classes (Complex, Real, Integral, etc.).
**Source code:** :source:`Lib/numbers.py`
--------------
The :mod:`numbers` module (:pep:`3141`) defines a hierarchy of numeric
:term:`abstract base classes <abstract base class>` which progressively define
more operations. None of the types defined in this module are intended to be instantiated.
.. class:: Number
The root of the numeric hierarchy. If you just want to check if an argument
*x* is a number, without caring what kind, use ``isinstance(x, Number)``.
The numeric tower
-----------------
.. class:: Complex
Subclasses of this type describe complex numbers and include the operations
that work on the built-in :class:`complex` type. These are: conversions to
:class:`complex` and :class:`bool`, :attr:`.real`, :attr:`.imag`, ``+``,
``-``, ``*``, ``/``, ``**``, :func:`abs`, :meth:`conjugate`, ``==``, and
``!=``. All except ``-`` and ``!=`` are abstract.
.. attribute:: real
Abstract. Retrieves the real component of this number.
.. attribute:: imag
Abstract. Retrieves the imaginary component of this number.
.. abstractmethod:: conjugate()
Abstract. Returns the complex conjugate. For example, ``(1+3j).conjugate()
== (1-3j)``.
.. class:: Real
To :class:`Complex`, :class:`Real` adds the operations that work on real
numbers.
In short, those are: a conversion to :class:`float`, :func:`math.trunc`,
:func:`round`, :func:`math.floor`, :func:`math.ceil`, :func:`divmod`, ``//``,
``%``, ``<``, ``<=``, ``>``, and ``>=``.
Real also provides defaults for :func:`complex`, :attr:`~Complex.real`,
:attr:`~Complex.imag`, and :meth:`~Complex.conjugate`.
.. class:: Rational
Subtypes :class:`Real` and adds
:attr:`~Rational.numerator` and :attr:`~Rational.denominator` properties, which
should be in lowest terms. With these, it provides a default for
:func:`float`.
.. attribute:: numerator
Abstract.
.. attribute:: denominator
Abstract.
.. class:: Integral
Subtypes :class:`Rational` and adds a conversion to :class:`int`. Provides
defaults for :func:`float`, :attr:`~Rational.numerator`, and
:attr:`~Rational.denominator`. Adds abstract methods for :func:`pow` with
modulus and bit-string operations: ``<<``, ``>>``, ``&``, ``^``, ``|``,
``~``.
Notes for type implementors
---------------------------
Implementors should be careful to make equal numbers equal and hash
them to the same values. This may be subtle if there are two different
extensions of the real numbers. For example, :class:`fractions.Fraction`
implements :func:`hash` as follows::
def __hash__(self):
if self.denominator == 1:
# Get integers right.
return hash(self.numerator)
# Expensive check, but definitely correct.
if self == float(self):
return hash(float(self))
else:
# Use tuple's hash to avoid a high collision rate on
# simple fractions.
return hash((self.numerator, self.denominator))
Adding More Numeric ABCs
~~~~~~~~~~~~~~~~~~~~~~~~
There are, of course, more possible ABCs for numbers, and this would
be a poor hierarchy if it precluded the possibility of adding
those. You can add ``MyFoo`` between :class:`Complex` and
:class:`Real` with::
class MyFoo(Complex): ...
MyFoo.register(Real)
.. _implementing-the-arithmetic-operations:
Implementing the arithmetic operations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We want to implement the arithmetic operations so that mixed-mode
operations either call an implementation whose author knew about the
types of both arguments, or convert both to the nearest built in type
and do the operation there. For subtypes of :class:`Integral`, this
means that :meth:`__add__` and :meth:`__radd__` should be defined as::
class MyIntegral(Integral):
def __add__(self, other):
if isinstance(other, MyIntegral):
return do_my_adding_stuff(self, other)
elif isinstance(other, OtherTypeIKnowAbout):
return do_my_other_adding_stuff(self, other)
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, MyIntegral):
return do_my_adding_stuff(other, self)
elif isinstance(other, OtherTypeIKnowAbout):
return do_my_other_adding_stuff(other, self)
elif isinstance(other, Integral):
return int(other) + int(self)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
else:
return NotImplemented
There are 5 different cases for a mixed-type operation on subclasses
of :class:`Complex`. I'll refer to all of the above code that doesn't
refer to ``MyIntegral`` and ``OtherTypeIKnowAbout`` as
"boilerplate". ``a`` will be an instance of ``A``, which is a subtype
of :class:`Complex` (``a : A <: Complex``), and ``b : B <:
Complex``. I'll consider ``a + b``:
1. If ``A`` defines an :meth:`__add__` which accepts ``b``, all is
well.
2. If ``A`` falls back to the boilerplate code, and it were to
return a value from :meth:`__add__`, we'd miss the possibility
that ``B`` defines a more intelligent :meth:`__radd__`, so the
boilerplate should return :const:`NotImplemented` from
:meth:`__add__`. (Or ``A`` may not implement :meth:`__add__` at
all.)
3. Then ``B``'s :meth:`__radd__` gets a chance. If it accepts
``a``, all is well.
4. If it falls back to the boilerplate, there are no more possible
methods to try, so this is where the default implementation
should live.
5. If ``B <: A``, Python tries ``B.__radd__`` before
``A.__add__``. This is ok, because it was implemented with
knowledge of ``A``, so it can handle those instances before
delegating to :class:`Complex`.
If ``A <: Complex`` and ``B <: Real`` without sharing any other knowledge,
then the appropriate shared operation is the one involving the built
in :class:`complex`, and both :meth:`__radd__` s land there, so ``a+b
== b+a``.
Because most of the operations on any given type will be very similar,
it can be useful to define a helper function which generates the
forward and reverse instances of any given operator. For example,
:class:`fractions.Fraction` uses::
def _operator_fallbacks(monomorphic_operator, fallback_operator):
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
# ...
|
PypiClean
|
/apache-superset-red-0.34.1.tar.gz/apache-superset-red-0.34.1/superset/views/annotations.py
|
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from wtforms.validators import StopValidation
from superset import appbuilder
from superset.models.annotations import Annotation, AnnotationLayer
from .base import DeleteMixin, SupersetModelView
class StartEndDttmValidator(object):
"""
Validates dttm fields.
"""
def __call__(self, form, field):
if not form["start_dttm"].data and not form["end_dttm"].data:
raise StopValidation(_("annotation start time or end time is required."))
elif (
form["end_dttm"].data
and form["start_dttm"].data
and form["end_dttm"].data < form["start_dttm"].data
):
raise StopValidation(
_("Annotation end time must be no earlier than start time.")
)
class AnnotationModelView(SupersetModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(Annotation)
list_title = _("List Annotation")
show_title = _("Show Annotation")
add_title = _("Add Annotation")
edit_title = _("Edit Annotation")
list_columns = ["layer", "short_descr", "start_dttm", "end_dttm"]
edit_columns = [
"layer",
"short_descr",
"long_descr",
"start_dttm",
"end_dttm",
"json_metadata",
]
add_columns = edit_columns
label_columns = {
"layer": _("Layer"),
"short_descr": _("Short Descr"),
"start_dttm": _("Start Dttm"),
"end_dttm": _("End Dttm"),
"long_descr": _("Long Descr"),
"json_metadata": _("JSON Metadata"),
}
description_columns = {
"json_metadata": "This JSON represents any additional metadata this \
annotation needs to add more context."
}
validators_columns = {"start_dttm": [StartEndDttmValidator()]}
def pre_add(self, obj):
if not obj.start_dttm:
obj.start_dttm = obj.end_dttm
elif not obj.end_dttm:
obj.end_dttm = obj.start_dttm
def pre_update(self, obj):
self.pre_add(obj)
class AnnotationLayerModelView(SupersetModelView, DeleteMixin):
datamodel = SQLAInterface(AnnotationLayer)
list_title = _("List Annotation Layer")
show_title = _("Show Annotation Layer")
add_title = _("Add Annotation Layer")
edit_title = _("Edit Annotation Layer")
list_columns = ["id", "name"]
edit_columns = ["name", "descr"]
add_columns = edit_columns
label_columns = {"name": _("Name"), "descr": _("Description")}
appbuilder.add_view(
AnnotationLayerModelView,
"Annotation Layers",
label=__("Annotation Layers"),
icon="fa-comment",
category="Manage",
category_label=__("Manage"),
category_icon="",
)
appbuilder.add_view(
AnnotationModelView,
"Annotations",
label=__("Annotations"),
icon="fa-comments",
category="Manage",
category_label=__("Manage"),
category_icon="",
)
|
PypiClean
|
/pyqtool-0.0.6.tar.gz/pyqtool-0.0.6/sizzle/selector.py
|
import regex
from collections import namedtuple
Attr = namedtuple('Attr', 'lft op rgt')
Pseudo = namedtuple('Pseudo', 'name value')
class Selector(object):
DESCENDANT = ' '
CHILD = '>'
SIBLING = '~'
ADJACENT = '+'
NOT_SET = None
class RE(object):
id = r'_?[A-Za-z0-9_]+|_'
ws = r'[\x20\t\r\n\f]*'
comma = '^{ws},{ws}'.format(ws=ws)
combinator = r'^{ws}([>+~ ]){ws}'.format(ws=ws)
type_selector = '({id})'.format(id=id)
id_selector = '#({id})'.format(id=id)
class_selector = r'\.(' + id + ')'
pseudo_selector = r'(:({id})\(([^()]+|(?1)?)\))'.format(id=id)
attr_selector = r'\[{ws}({id}){ws}([*^$|!~]?=)(.*?)\]'.format(
id=id, ws=ws)
selector = '(?:(?:{typ})?({id}|{cls}|{pseudo}|{attr})+|{typ})'.format(
typ=id, id=id_selector, cls=class_selector, pseudo=pseudo_selector,
attr=attr_selector)
def __init__(self, name, combinator=None):
self.name = name
self.combinator = combinator
self.next_selector = None
selector_patterns = {
'types': self.RE.type_selector,
'ids': self.RE.id_selector,
'classes': self.RE.class_selector,
'pseudos': self.RE.pseudo_selector,
'attrs': self.RE.attr_selector,
}
matches = {}
while True:
pattern_matched = False
for key, pattern in selector_patterns.items():
match = regex.search(r'^{}'.format(pattern), name)
if match:
i, pos = match.span()
if key not in matches:
matches[key] = []
matches[key].append(match.groups())
name = name[pos:]
pattern_matched = True
if not pattern_matched:
break
self.typ = None
for types in matches.pop('types', []):
self.typ = types[0]
self.id_ = None
for ids in matches.pop('ids', []):
self.id_ = ids[0]
self.classes = [a[0] for a in matches.pop('classes', [])]
self.attrs = [
Attr(l, o, r.strip())
for l, o, r in matches.pop('attrs', [])
]
self.pseudos = [
Pseudo(*a[1:])
for a in matches.pop('pseudos', [])
]
def __repr__(self):
return 'Selector <{}>'.format(self.name)
@classmethod
def parse(cls, string):
selectors = []
combinator = None
prev_selector = None
while True:
match = regex.search(cls.RE.comma, string)
if match:
# skip comma
_, pos = match.span()
string = string[pos:]
continue
match = regex.search(cls.RE.combinator, string)
if match:
_, pos = match.span()
combinator = string[:pos].strip()
string = string[pos:]
else:
combinator = None
match = regex.search(cls.RE.selector, string)
if match:
_, pos = match.span()
seltext = string[:pos]
string = string[pos:]
selector = cls(seltext, combinator=combinator)
if combinator is not None and prev_selector:
prev_selector.next_selector = prev_selector = selector
else:
prev_selector = selector
selectors.append(selector)
continue
break
return selectors
|
PypiClean
|
/robologs_ros_utils-0.1.1a26-py3-none-any.whl/robologs_ros_utils/utils/file_utils/file_utils.py
|
import glob
import json
import os
import shutil
import tarfile
import uuid
from typing import Union
from zipfile import ZipFile
def split_folder_path_to_list(path: str) -> list:
"""
This function splits a path into a list.
Args:
path (str): file path
Returns: A list with path components
"""
path = os.path.normpath(path)
return path.split(os.sep)
def create_directory(path: str, delete_if_exists: bool = False) -> None:
"""
This function creates a directory.
Args:
path (str): directory path
delete_if_exists (bool): if True, existing directory will be deleted
Returns: None
"""
if delete_if_exists:
if os.path.exists(path):
shutil.rmtree(path)
if not os.path.exists(path):
os.makedirs(path)
return
def check_file_exists(path: str) -> None:
"""
This function checks if a file exists, and
raises an exception if not
Args:
path (str): input file path
Returns: None
"""
if not os.path.exists(path):
raise Exception(f"{path} does not exist.")
return
def save_json(data: Union[dict, list], path: str) -> None:
"""
This function saves a list or
Args:
data (dict or list):
path (str):
Returns:
"""
with open(path, "w") as f_json:
json.dump(data, f_json, indent=4, sort_keys=True)
return
def read_json(json_path: str):
"""
This function reads a json file and return a JSON object
Args:
json_path (str): JSON file path
Returns: JSON object
"""
with open(json_path) as json_file:
data = json.load(json_file)
return data
def create_uuid() -> str:
"""
This function returns a UUID
Returns: UUID
"""
return str(uuid.uuid4())
def find_sub_folder(sub_folder_name: str, search_path: str) -> list:
"""
This function finds a filename in a subfolder.
Args:
sub_folder_name (str): name of subfolder
search_path (str): path of folder to be searched
Returns: A list with filenames
"""
result = []
for root, dir, files in os.walk(search_path):
print(root)
print(dir)
if sub_folder_name in dir:
result.append(os.path.join(root))
return result
def unzip_file_to_folder(path_zip_file: str, output_folder: str) -> None:
"""
This function unzips a file to a specific folder location.
Args:
path_zip_file (str): absolute path of .zip file
output_folder (str): absolute path of output folder
Returns: None
"""
with ZipFile(path_zip_file, "r") as zipObj:
zipObj.extractall(output_folder)
return
def untar_file_to_folder(path_tar_file: str, output_folder: str) -> None:
"""
This function untars a file to a specific folder location.
Args:
path_tar_file (str): absolute path of .tar file
output_folder (str): absolute path of output folder
Returns: None
"""
tar_file = tarfile.open(path_tar_file)
tar_file.extractall(output_folder)
tar_file.close()
return
def get_all_files_of_type_in_directory(input_folder: str, file_format: str) -> list:
"""
This function gets a list of all files of type "file_format" in a directory
Args:
input_folder (str): input folder path
Returns: list with .pdf files
"""
subfolder_list = glob.glob(f'{input_folder}/*/')
file_string = f"./*.{file_format}"
ll = list()
# look for files in subfolders
for entry in subfolder_list:
ll = ll + sorted(glob.glob(os.path.abspath(os.path.join(entry, file_string))))
# look for files in folder
ll = ll + sorted(glob.glob(os.path.abspath(os.path.join(input_folder, file_string))))
return ll
def find_substring_path(input_folder: str, substring: str) -> str:
"""
This function returns files which containa certain substring
Args:
input_folder (str): input folder
substring (str): substring
Returns: file path
"""
glob_str = f"{input_folder}*{substring}*"
return glob.glob(glob_str)
def delete_files_of_type(input_folder: str, file_format_list: list = [".jpg", ".png"]) -> None:
"""
This function deletes all files of type
Args:
input_folder (str): input folder
file_format_list (list): list of file types to be deleted. E.g. [.jpg, .png]
Returns:
"""
for file_format in file_format_list:
for filename in sorted(glob.glob(os.path.join(input_folder, f"./*{file_format}"))):
os.remove(filename)
return
|
PypiClean
|
/discord.py_self-2.0.0-py3-none-any.whl/discord/file.py
|
from __future__ import annotations
from base64 import b64encode
from hashlib import md5
import io
import os
from typing import Any, Dict, Optional, Tuple, Union
from .utils import MISSING, cached_slot_property
# fmt: off
__all__ = (
'File',
)
# fmt: on
def _strip_spoiler(filename: str) -> Tuple[str, bool]:
stripped = filename
while stripped.startswith('SPOILER_'):
stripped = stripped[8:] # len('SPOILER_')
spoiler = stripped != filename
return stripped, spoiler
class File:
r"""A parameter object used for :meth:`abc.Messageable.send`
for sending file objects.
.. note::
File objects are single use and are not meant to be reused in
multiple :meth:`abc.Messageable.send`\s.
Attributes
-----------
fp: Union[:class:`os.PathLike`, :class:`io.BufferedIOBase`]
A file-like object opened in binary mode and read mode
or a filename representing a file in the hard drive to
open.
.. note::
If the file-like object passed is opened via ``open`` then the
modes 'rb' should be used.
To pass binary data, consider usage of ``io.BytesIO``.
spoiler: :class:`bool`
Whether the attachment is a spoiler. If left unspecified, the :attr:`~File.filename` is used
to determine if the file is a spoiler.
description: Optional[:class:`str`]
The file description to display, currently only supported for images.
.. versionadded:: 2.0
"""
__slots__ = ('fp', '_filename', 'spoiler', 'description', '_original_pos', '_owner', '_closer', '_cs_md5')
def __init__(
self,
fp: Union[str, bytes, os.PathLike[Any], io.BufferedIOBase],
filename: Optional[str] = None,
*,
spoiler: bool = MISSING,
description: Optional[str] = None,
):
if isinstance(fp, io.IOBase):
if not (fp.seekable() and fp.readable()):
raise ValueError(f'File buffer {fp!r} must be seekable and readable')
self.fp: io.BufferedIOBase = fp
self._original_pos = fp.tell()
self._owner = False
else:
self.fp = open(fp, 'rb')
self._original_pos = 0
self._owner = True
# aiohttp only uses two methods from IOBase (read and close)
# Since I want to control when the files close,
# I need to stub it so it doesn't close unless I tell it to
self._closer = self.fp.close
self.fp.close = lambda: None
if filename is None:
if isinstance(fp, str):
_, filename = os.path.split(fp)
else:
filename = getattr(fp, 'name', 'untitled')
self._filename, filename_spoiler = _strip_spoiler(filename)
if spoiler is MISSING:
spoiler = filename_spoiler
self.spoiler: bool = spoiler
self.description: Optional[str] = description
@property
def filename(self) -> str:
""":class:`str`: The filename to display when uploading to Discord.
If this is not given then it defaults to ``fp.name`` or if ``fp`` is
a string then the ``filename`` will default to the string given.
"""
return 'SPOILER_' + self._filename if self.spoiler else self._filename
@filename.setter
def filename(self, value: str) -> None:
self._filename, self.spoiler = _strip_spoiler(value)
@cached_slot_property('_cs_md5')
def md5(self) -> str:
try:
return b64encode(md5(self.fp.read()).digest()).decode('utf-8')
finally:
self.reset()
def reset(self, *, seek: Union[int, bool] = True) -> None:
# The `seek` parameter is needed because
# the retry-loop is iterated over multiple times
# starting from 0, as an implementation quirk
# the resetting must be done at the beginning
# before a request is done, since the first index
# is 0, and thus false, then this prevents an
# unnecessary seek since it's the first request
# done.
if seek:
self.fp.seek(self._original_pos)
def close(self) -> None:
self.fp.close = self._closer
if self._owner:
self._closer()
def to_dict(self, index: int) -> Dict[str, Any]:
payload = {
'id': index,
'filename': self.filename,
}
if self.description is not None:
payload['description'] = self.description
return payload
|
PypiClean
|
/nanodeep-0.0.3.tar.gz/nanodeep-0.0.3/bream4/toolkit/procedure_components/sequencing_features/saturation_watcher.py
|
from __future__ import annotations
from collections import defaultdict
from typing import Optional
import pandas as pd
from bream4.toolkit.procedure_components.feature_manager import ChannelNotifierState
class SaturationWatcher:
"""Used to watch and record when channels become saturated. Example:
* Instantiate
* With a feature manager...
* Use update_conditions whenever the well configuration changes (Can't be streamed from minknow)
* You can also pass any extra information that you want to be recorded when saturation is triggered
* Check results with how_many_saturated or get the raw df (self.saturation_df)
Example:
>>> x = SaturationWatcher(extra_columns=['voltage'])
>>> x.update_conditions(channel_configuration={100: 1}, voltage=55)
# x receives a saturation event on channel 100 from the feature manager
>>> x.update_conditions(channel_configuration={100: 2, 200: 2})
# x receives a saturation event on channel 100 from the feature manager
>>> x.update_conditions(voltage=66)
# x receives a saturation event on channel 200 from the feature manager
>>> x.saturation_df
channel | well | state | switched_off_time | voltage
--------+------+-----------+-------------------+--------
100 | 1 | saturated | 2.4 | 55
100 | 2 | saturated | 3.0 | 55
200 | 2 | saturated | 3.1 | 66
>>> x.how_many_saturated(well=[1])
1
>>> x.how_many_saturated(well=[2])
2
>>> x.how_many_saturated(well=[2], voltage=[55])
1
"""
def __init__(self, sample_rate: int, extra_columns: Optional[list[str]] = None):
"""Initialise the saturation watcher
:param sample_rate: int of sample rate. Used to convert sample timings to seconds
:param extra_columns: Any extra columns that are expected
"""
self.extra_columns = extra_columns if extra_columns is not None else []
self.columns = ["channel", "well", "state", "switched_off_time"]
self.columns.extend(self.extra_columns)
# Stores information about any extra information provided by update_conditions
self.extra_info = {}
self.saturation_df = pd.DataFrame([], columns=self.columns)
self.states_to_watch = ["saturated"]
# Stores what the channel configuration should be
self.channel_configuration = defaultdict(int)
# Used to convert trigger_time from samples to seconds
self.sample_rate = float(sample_rate)
def update_conditions(self, channel_configuration: Optional[dict[int, int]] = None, **kwargs) -> None:
"""Update the conditions that are stored when a saturation channel state occurs
If kwargs are specified these will get added to the df.
:param channel_configuration: What new wells channels are going to be in. dict(channel-> well)
:param kwargs: Any extra information you want saved in the df when saturation happens
"""
if channel_configuration is not None:
self.channel_configuration.update(channel_configuration)
if kwargs:
for (k, v) in kwargs.items():
if k not in self.extra_info:
self.extra_info[k] = [v]
else:
self.extra_info[k].append(v)
def execute(self, states: Optional[dict[int, ChannelNotifierState]] = None) -> None:
if states:
new_rows = []
for (channel, state) in states.items():
if state.state_name in self.states_to_watch:
new_item = {
"channel": channel,
"well": self.channel_configuration[channel],
"state": state.state_name,
"switched_off_time": state.trigger_time / self.sample_rate,
}
# Make sure any extra columns are present even if the value hasn't been established
new_item.update({k: None for k in self.extra_columns})
# Update with any info that we do have
new_item.update({k: v[-1] for (k, v) in self.extra_info.items()})
new_rows.append(new_item)
add_df = pd.DataFrame(new_rows, columns=self.saturation_df.columns)
if self.saturation_df.empty:
self.saturation_df = add_df
else:
self.saturation_df = self.saturation_df.append(add_df, ignore_index=True)
def how_many_saturated(self, **kwargs) -> int:
"""Return how many channels have currently been saturated given some criteria.
:param kwargs: Which extra_info to filter on. dict(item->list)
"""
subset = self.saturation_df
if kwargs:
for (key, value) in kwargs.items():
subset = subset[subset[key].isin(value)]
return len(subset)
|
PypiClean
|
/nnisgf-0.4-py3-none-manylinux1_x86_64.whl/nnisgf-0.4.data/data/nni/node_modules/moment/locale/de.js
|
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
function processRelativeTime(number, withoutSuffix, key, isFuture) {
var format = {
'm': ['eine Minute', 'einer Minute'],
'h': ['eine Stunde', 'einer Stunde'],
'd': ['ein Tag', 'einem Tag'],
'dd': [number + ' Tage', number + ' Tagen'],
'M': ['ein Monat', 'einem Monat'],
'MM': [number + ' Monate', number + ' Monaten'],
'y': ['ein Jahr', 'einem Jahr'],
'yy': [number + ' Jahre', number + ' Jahren']
};
return withoutSuffix ? format[key][0] : format[key][1];
}
var de = moment.defineLocale('de', {
months : 'Januar_Februar_März_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember'.split('_'),
monthsShort : 'Jan._Feb._März_Apr._Mai_Juni_Juli_Aug._Sep._Okt._Nov._Dez.'.split('_'),
monthsParseExact : true,
weekdays : 'Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag'.split('_'),
weekdaysShort : 'So._Mo._Di._Mi._Do._Fr._Sa.'.split('_'),
weekdaysMin : 'So_Mo_Di_Mi_Do_Fr_Sa'.split('_'),
weekdaysParseExact : true,
longDateFormat : {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L : 'DD.MM.YYYY',
LL : 'D. MMMM YYYY',
LLL : 'D. MMMM YYYY HH:mm',
LLLL : 'dddd, D. MMMM YYYY HH:mm'
},
calendar : {
sameDay: '[heute um] LT [Uhr]',
sameElse: 'L',
nextDay: '[morgen um] LT [Uhr]',
nextWeek: 'dddd [um] LT [Uhr]',
lastDay: '[gestern um] LT [Uhr]',
lastWeek: '[letzten] dddd [um] LT [Uhr]'
},
relativeTime : {
future : 'in %s',
past : 'vor %s',
s : 'ein paar Sekunden',
ss : '%d Sekunden',
m : processRelativeTime,
mm : '%d Minuten',
h : processRelativeTime,
hh : '%d Stunden',
d : processRelativeTime,
dd : processRelativeTime,
M : processRelativeTime,
MM : processRelativeTime,
y : processRelativeTime,
yy : processRelativeTime
},
dayOfMonthOrdinalParse: /\d{1,2}\./,
ordinal : '%d.',
week : {
dow : 1, // Monday is the first day of the week.
doy : 4 // The week that contains Jan 4th is the first week of the year.
}
});
return de;
})));
|
PypiClean
|
/parasail-1.3.4.tar.gz/parasail-1.3.4/README.rst
|
parasail-python
===============
Python Bindings for the Parasail C Library
Travis Build Status:
.. image:: https://travis-ci.org/jeffdaily/parasail-python.svg?branch=master
:alt: Build Status
PyPI Package:
.. image:: https://badge.fury.io/py/parasail.svg
:target: https://badge.fury.io/py/parasail
Author: Jeff Daily ([email protected])
Table of Contents
-----------------
- `Installation <#installation>`__
- `Using pip <#using-pip>`__
- `Testing <#tesing>`__
- `Building from Source <#building-from-source>`__
- `Quick Example <#quick-example>`__
- `Standard Function Naming Convention <#standard-function-naming-convention>`__
- `Profile Function Naming Convention <#profile-function-naming-convention>`__
- `Substitution Matrices <#substitution-matrices>`__
- `SSW Library Emulation <#ssw-library-emulation>`__
- `Banded Global Alignment <#banded-global-alignment>`__
- `File Input <#file-input>`__
- `Tracebacks <#tracebacks>`__
- `Citing parasail <#citing-parasail>`__
- `License: Battelle BSD-style <#license-battelle-bsd-style>`__
This package contains Python bindings for
`parasail <https://github.com/jeffdaily/parasail>`__. Parasail is a SIMD
C (C99) library containing implementations of the Smith-Waterman
(local), Needleman-Wunsch (global), and semi-global pairwise sequence
alignment algorithms.
Installation
------------
`back to top <#table-of-contents>`__
Using pip
+++++++++
`back to top <#table-of-contents>`__
The recommended way of installing is to use the latest version available via pip.
::
pip install parasail
Binaries for Windows and OSX should be available via pip. Using pip on a Linux platform will first download the latest version of the parasail C library sources and then compile them automatically into a shared library. For an installation from sources, or to learn how the pip installation works on Linux, please read on.
Testing
+++++++
`back to top <#table-of-contents>`__
To run the testsuite use the unittest runner.
::
python -m unittest discover tests
Building from Source
++++++++++++++++++++
`back to top <#table-of-contents>`__
The parasail python bindings are based on ctypes. Unfortunately, best practices are not firmly established for providing cross-platform and user-friendly python bindings based on ctypes. The approach with parasail-python is to install the parasail shared library as "package data" and use a relative path from the parasail/__init__.py in order to locate the shared library.
There are two approaches currently supported. First, you can compile your own parasail shared library using one of the recommended build processes described in the parasail C library README.md, then copy the parasail.dll (Windows), libparasail.so (Linux), or libparasail.dylib (OSX) shared library to parasail-python/parasail -- the same folder location as parasasail-python/parasail/__init__.py.
The second approach is to let the setup.py script attempt to download and compile the parasail C library for you using the configure script that comes with it. This happens as a side effect of the bdist_wheel target.
::
python setup.py bdist_wheel
The bdist_wheel target will first look for the shared library. If it exists, it will happily install it as package data. Otherwise, the latest parasail master branch from github will be downloaded, unzipped, configured, made, and the shared library will be copied into the appropriate location for package data installation.
The downloading and building of the parasail C library can be skipped if you set the environment variable PARASAIL_SKIP_BUILD to any value prior to running setup.py or pip install. At runtime during import, the parasail bindings will search for the parasail C library first in the package data location, then in standard system locations, and lastly by searching through the environment variables PARASAIL_LIBPATH, LD_LIBRARY_PATH, DYLD_LIBRARY_PATH, and PATH.. For verbose output during this search, set PARASAIL_VERBOSE=1.
Quick Example
-------------
`back to top <#table-of-contents>`__
The Python interface only includes bindings for the dispatching
functions, not the low-level instruction set-specific function calls.
The Python interface also includes wrappers for the various PAM and
BLOSUM matrices included in the distribution.
Gap open and extension penalties are specified as positive integers. When any of the algorithms open a gap, only the gap open penalty alone is applied.
.. code:: python
import parasail
result = parasail.sw_scan_16("asdf", "asdf", 11, 1, parasail.blosum62)
result = parasail.sw_stats_striped_8("asdf", "asdf", 11, 1, parasail.pam100)
Be careful using the attributes of the Result object - especially on Result instances constructed on the fly. For example, calling `parasail.sw_trace("asdf", "asdf", 11, 1, parasail.blosum62).cigar.seq` returns a numpy.ndarray that wraps a pointer to memory that is invalid because the Cigar is deallocated before the `seq` statement. You can avoid this problem by assigning Result instances to variables as in the example above.
Standard Function Naming Convention
-----------------------------------
`back to top <#table-of-contents>`__
There are many functions within the parasail library, but most are variations of the familiar main
algorithms. The following table describes the main algorithms and the shorthand name used for the function.
========================================================================================= =============
Algorithm Function Name
========================================================================================= =============
Smith-Waterman local alignment sw
Needleman-Wunsch global alignment nw
Semi-Global, do not penalize gaps at beginning of s1/query sg_qb
Semi-Global, do not penalize gaps at end of s1/query sg_qe
Semi-Global, do not penalize gaps at beginning and end of s1/query sg_qx
Semi-Global, do not penalize gaps at beginning of s2/database sg_db
Semi-Global, do not penalize gaps at end of s2/database sg_de
Semi-Global, do not penalize gaps at beginning and end of s2/database sg_dx
Semi-Global, do not penalize gaps at beginning of s1/query and end of s2/database sg_qb_de
Semi-Global, do not penalize gaps at beginning of s2/database and end of s1/query sg_qe_db
Semi-Global, do not penalize gaps at beginning of s1/query and beginning of s2/database sg_qb_db
Semi-Global, do not penalize gaps at end of s2/database and end of s1/query sg_qe_de
Semi-Global, do not penalize gaps at beginning and end of both sequences sg
========================================================================================= =============
A good summary of the various alignment algorithms can be found courtesy of Dr. Dannie Durand's course on
computational genomics `here <http://www.cs.cmu.edu/~durand/03-711/2015/Lectures/PW_sequence_alignment_2015.pdf>`_.
The same document was copied locally to the C library repo in case this link ever breaks (`link <https://github.com/jeffdaily/parasail/blob/master/contrib/PW_sequence_alignment_2015.pdf>`_).
To make it easier to find the function you're looking for, the function names follow a naming convention. The following will use set notation {} to indicate a selection must be made and brackets [] to indicate an optional part of the name.
- Non-vectorized, reference implementations.
- Required, select algorithm from table above.
- Optional return alignment statistics.
- Optional return DP table or last row/col.
- Optional use a prefix scan implementation.
- ``parasail. {nw,sg,sg_qb,sg_qe,sg_qx,sg_db,sg_de,sg_dx,sg_qb_de,sg_qe_db,sg_qb_db,sg_qe_de,sw} [_stats] [{_table,_rowcol}] [_scan]``
- Non-vectorized, traceback-capable reference implementations.
- Required, select algorithm from table above.
- Optional use a prefix scan implementation.
- ``parasail. {nw,sg,sg_qb,sg_qe,sg_qx,sg_db,sg_de,sg_dx,sg_qb_de,sg_qe_db,sg_qb_db,sg_qe_de,sw} _trace [_scan]``
- Vectorized.
- Required, select algorithm from table above.
- Optional return alignment statistics.
- Optional return DP table or last row/col.
- Required, select vectorization strategy -- striped is a good place to start, but scan is often faster for global alignment.
- Required, select solution width. 'sat' will attempt 8-bit solution but if overflow is detected it will then perform the 16-bit operation. Can be faster in some cases, though 16-bit is often sufficient.
- ``parasail. {nw,sg,sg_qb,sg_qe,sg_qx,sg_db,sg_de,sg_dx,sg_qb_de,sg_qe_db,sg_qb_db,sg_qe_de,sw} [_stats] [{_table,_rowcol}] {_striped,_scan,_diag} {_8,_16,_32,_64,_sat}``
- Vectorized, traceback-capable.
- Required, select algorithm from table above.
- Required, select vectorization strategy -- striped is a good place to start, but scan is often faster for global alignment.
- Required, select solution width. 'sat' will attempt 8-bit solution but if overflow is detected it will then perform the 16-bit operation. Can be faster in some cases, though 16-bit is often sufficient.
- ``parasail. {nw,sg,sg_qb,sg_qe,sg_qx,sg_db,sg_de,sg_dx,sg_qb_de,sg_qe_db,sg_qb_db,sg_qe_de,sw} _trace {_striped,_scan,_diag} {_8,_16,_32,_64,_sat}``
Profile Function Naming Convention
----------------------------------
`back to top <#table-of-contents>`__
It has been noted in literature that some performance can be gained by reusing the query sequence when using striped [Farrar, 2007] or scan [Daily, 2015] vector strategies. There is a special subset of functions that enables this behavior. For the striped and scan vector implementations *only*, a query profile can be created and reused for subsequent alignments. This can noticeably speed up applications such as database search.
- Profile creation
- Optional, prepare query profile for a function that returns statistics. Stats require additional data structures to be allocated.
- Required, select solution width. 'sat' will allocate profiles for both 8- and 16-bit solutions.
- ``parasail.profile_create [_stats] {_8,_16,_32,_64,_sat}``
- Profile use
- Vectorized.
- Required, select algorithm from table above.
- Optional return alignment statistics.
- Optional return DP table or last row/col.
- Required, select vectorization strategy -- striped is a good place to start, but scan is often faster for global alignment.
- Required, select solution width. 'sat' will attempt 8-bit solution but if overflow is detected it will then perform the 16-bit operation. Can be faster in some cases, though 16-bit is often sufficient.
- ``parasail. {nw,sg,sg_qb,sg_qe,sg_qx,sg_db,sg_de,sg_dx,sg_qb_de,sg_qe_db,sg_qb_db,sg_qe_de,sw} [_stats] [{_table,_rowcol}] {_striped,_scan} _profile {_8,_16,_32,_64,_sat}``
- Vectorized, traceback-capable.
- Required, select algorithm from table above.
- Required, select vectorization strategy -- striped is a good place to start, but scan is often faster for global alignment.
- Required, select solution width. 'sat' will attempt 8-bit solution but if overflow is detected it will then perform the 16-bit operation. Can be faster in some cases, though 16-bit is often sufficient.
- ``parasail. {nw,sg,sg_qb,sg_qe,sg_qx,sg_db,sg_de,sg_dx,sg_qb_de,sg_qe_db,sg_qb_db,sg_qe_de,sw} _trace {_striped,_scan} _profile {_8,_16,_32,_64,_sat}``
Please note that the bit size you select for creating the profile *must* match the bit size of the function you call. The example below uses a 16-bit profile and a 16-bit function.
.. code:: python
profile = parasail.profile_create_16("asdf", parasail.blosum62)
result1 = parasail.sw_trace_striped_profile_16(profile, "asdf", 10, 1)
result2 = parasail.nw_scan_profile_16(profile, "asdf", 10, 1)
Substitution Matrices
---------------------
`back to top <#table-of-contents>`__
parasail bundles a number of substitution matrices including PAM and BLOSUM. To use them, look them up by name (useful for command-line parsing) or use directly. For example
.. code:: python
print(parasail.blosum62)
matrix = parasail.Matrix("pam100")
You can also create your own matrices with simple match/mismatch values.
For more complex matrices, you can start by copying a built-in matrix or
start simple and modify values as needed. For example
.. code:: python
# copy a built-in matrix, then modify like a numpy array
matrix = parasail.blosum62.copy()
matrix[2,4] = 200
matrix[3,:] = 100
user_matrix = parasail.matrix_create("ACGT", 2, -1)
You can also parse simple matrix files using the function if the file is in the following format::
#
# Any line starting with '#' is a comment.
#
# Needs a row for the alphabet. First column is a repeat of the
# alphabet and assumed to be identical in order to the first alphabet row.
#
# Last row and column *must* be a non-alphabet character to represent
# any input sequence character that is outside of the alphabet.
#
A T G C S W R Y K M B V H D N U *
A 5 -4 -4 -4 -4 1 1 -4 -4 1 -4 -1 -1 -1 -2 -4 -5
T -4 5 -4 -4 -4 1 -4 1 1 -4 -1 -4 -1 -1 -2 5 -5
G -4 -4 5 -4 1 -4 1 -4 1 -4 -1 -1 -4 -1 -2 -4 -5
C -4 -4 -4 5 1 -4 -4 1 -4 1 -1 -1 -1 -4 -2 -4 -5
S -4 -4 1 1 -1 -4 -2 -2 -2 -2 -1 -1 -3 -3 -1 -4 -5
W 1 1 -4 -4 -4 -1 -2 -2 -2 -2 -3 -3 -1 -1 -1 1 -5
R 1 -4 1 -4 -2 -2 -1 -4 -2 -2 -3 -1 -3 -1 -1 -4 -5
Y -4 1 -4 1 -2 -2 -4 -1 -2 -2 -1 -3 -1 -3 -1 1 -5
K -4 1 1 -4 -2 -2 -2 -2 -1 -4 -1 -3 -3 -1 -1 1 -5
M 1 -4 -4 1 -2 -2 -2 -2 -4 -1 -3 -1 -1 -3 -1 -4 -5
B -4 -1 -1 -1 -1 -3 -3 -1 -1 -3 -1 -2 -2 -2 -1 -1 -5
V -1 -4 -1 -1 -1 -3 -1 -3 -3 -1 -2 -1 -2 -2 -1 -4 -5
H -1 -1 -4 -1 -3 -1 -3 -1 -3 -1 -2 -2 -1 -2 -1 -1 -5
D -1 -1 -1 -4 -3 -1 -1 -3 -1 -3 -2 -2 -2 -1 -1 -1 -5
N -2 -2 -2 -2 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -2 -5
U -4 5 -4 -4 -4 1 -4 1 1 -4 -1 -4 -1 -1 -2 5 -5
* -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5 -5
.. code:: python
matrix_from_filename = parasail.Matrix("filename.txt")
SSW Library Emulation
---------------------
`back to top <#table-of-contents>`__
The SSW library (https://github.com/mengyao/Complete-Striped-Smith-Waterman-Library) performs Smith-Waterman local alignment using SSE2 instructions and a striped vector. Its result provides the primary score, a secondary score, beginning and ending locations of the alignment for both the query and reference sequences, as well as a SAM CIGAR. There are a few parasail functions that emulate this behavior, with the only exception being that parasail does not calculate a secondary score.
.. code:: python
score_size = 1 # 0, use 8-bit align; 1, use 16-bit; 2, try both
profile = parasail.ssw_init("asdf", parasail.blosum62, score_size)
result = parasail.ssw_profile(profile, "asdf", 10, 1)
print(result.score1)
print(result.cigar)
print(result.ref_begin1)
print(result.ref_end1)
print(result.read_begin1)
print(result.read_end1)
# or skip profile creation
result = parasail.ssw("asdf", "asdf", 10, 1, parasail.blosum62)
Banded Global Alignment
-----------------------
`back to top <#table-of-contents>`__
There is one version of banded global alignment available. Though it is not vectorized, it might still be faster than using other parasail global alignment functions, especially for large sequences. The function signature is similar to the other parasail functions with the only exception being ``k``, the band width.
.. code:: python
band_size = 3
result = parasail.nw_banded("asdf", "asdf", 10, 1, band_size, matrix):
File Input
----------
`back to top <#table-of-contents>`__
Parasail can parse FASTA, FASTQ, and gzipped versions of such files if
zlib was found during the C library build. The
function ``parasail.sequences_from_file`` will return a list-like object
containing Sequence instances. A parasail Sequence behaves like an
immutable string but also has extra attributes ``name``, ``comment``,
and ``qual``. These attributes will return an empty string if the input
file did not contain these fields.
Tracebacks
----------
`back to top <#table-of-contents>`__
Parasail supports accessing a SAM CIGAR string from a result. You must use a traceback-capable alignment function. Refer to the C interface description above for details on how to use a traceback-capable alignment function.
.. code:: python
result = parasail.sw_trace("asdf", "asdf", 10, 1, parasail.blosum62)
cigar = result.cigar
# cigars have seq, len, beg_query, and beg_ref properties
# the seq property is encoded
print(cigar.seq)
# use decode attribute to return a decoded cigar string
print(cigar.decode)
Citing parasail
---------------
`back to top <#table-of-contents>`__
If needed, please cite the following paper.
Daily, Jeff. (2016). Parasail: SIMD C library for global, semi-global,
and local pairwise sequence alignments. *BMC Bioinformatics*, 17(1),
1-11. doi:10.1186/s12859-016-0930-z
http://dx.doi.org/10.1186/s12859-016-0930-z
License: Battelle BSD-style
---------------------------
`back to top <#table-of-contents>`__
Copyright (c) 2015, Battelle Memorial Institute
1. Battelle Memorial Institute (hereinafter Battelle) hereby grants
permission to any person or entity lawfully obtaining a copy of this
software and associated documentation files (hereinafter “the
Software”) to redistribute and use the Software in source and binary
forms, with or without modification. Such person or entity may use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and may permit others to do so, subject to
the following conditions:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimers.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
- Other than as used herein, neither the name Battelle Memorial
Institute or Battelle may be used in any form whatsoever without
the express written consent of Battelle.
- Redistributions of the software in any form, and publications
based on work performed using the software should include the
following citation as a reference:
Daily, Jeff. (2016). Parasail: SIMD C library for global,
semi-global, and local pairwise sequence alignments. *BMC
Bioinformatics*, 17(1), 1-11. doi:10.1186/s12859-016-0930-z
2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
PypiClean
|
/odoo12_addon_l10n_nl_tax_statement-12.0.2.0.3-py3-none-any.whl/odoo/addons/l10n_nl_tax_statement/wizard/l10n_nl_vat_statement_config_wizard.py
|
from odoo import api, fields, models
class VatStatementConfigWizard(models.TransientModel):
_name = 'l10n.nl.vat.statement.config.wizard'
_description = 'Netherlands Vat Statement Configuration Wizard'
tag_1a_omzet = fields.Many2one('account.account.tag')
tag_1a_btw = fields.Many2one('account.account.tag')
tag_1b_omzet = fields.Many2one('account.account.tag')
tag_1b_btw = fields.Many2one('account.account.tag')
tag_1c_omzet = fields.Many2one('account.account.tag')
tag_1c_btw = fields.Many2one('account.account.tag')
tag_1d_omzet = fields.Many2one('account.account.tag')
tag_1d_btw = fields.Many2one('account.account.tag')
tag_1e_omzet = fields.Many2one('account.account.tag')
tag_2a_omzet = fields.Many2one('account.account.tag')
tag_2a_btw = fields.Many2one('account.account.tag')
tag_3a_omzet = fields.Many2one('account.account.tag')
tag_3b_omzet = fields.Many2one('account.account.tag')
tag_3b_omzet_d = fields.Many2one('account.account.tag')
tag_3c_omzet = fields.Many2one('account.account.tag')
tag_4a_omzet = fields.Many2one('account.account.tag')
tag_4a_btw = fields.Many2one('account.account.tag')
tag_4b_omzet = fields.Many2one('account.account.tag')
tag_4b_btw = fields.Many2one('account.account.tag')
tag_5b_btw = fields.Many2one('account.account.tag')
@api.model
def default_get(self, fields_list):
defv = super().default_get(fields_list)
company_id = self.env.user.company_id.id
config = self.env['l10n.nl.vat.statement.config'].search([
('company_id', '=', company_id)], limit=1
)
if config:
defv.setdefault('tag_1a_omzet', config.tag_1a_omzet.id)
defv.setdefault('tag_1a_btw', config.tag_1a_btw.id)
defv.setdefault('tag_1b_omzet', config.tag_1b_omzet.id)
defv.setdefault('tag_1b_btw', config.tag_1b_btw.id)
defv.setdefault('tag_1c_omzet', config.tag_1c_omzet.id)
defv.setdefault('tag_1c_btw', config.tag_1c_btw.id)
defv.setdefault('tag_1d_omzet', config.tag_1d_omzet.id)
defv.setdefault('tag_1d_btw', config.tag_1d_btw.id)
defv.setdefault('tag_1e_omzet', config.tag_1e_omzet.id)
defv.setdefault('tag_2a_omzet', config.tag_2a_omzet.id)
defv.setdefault('tag_2a_btw', config.tag_2a_btw.id)
defv.setdefault('tag_3a_omzet', config.tag_3a_omzet.id)
defv.setdefault('tag_3b_omzet', config.tag_3b_omzet.id)
defv.setdefault('tag_3b_omzet_d', config.tag_3b_omzet_d.id)
defv.setdefault('tag_3c_omzet', config.tag_3c_omzet.id)
defv.setdefault('tag_4a_omzet', config.tag_4a_omzet.id)
defv.setdefault('tag_4a_btw', config.tag_4a_btw.id)
defv.setdefault('tag_4b_omzet', config.tag_4b_omzet.id)
defv.setdefault('tag_4b_btw', config.tag_4b_btw.id)
defv.setdefault('tag_5b_btw', config.tag_5b_btw.id)
return defv
if not self._is_l10n_nl_coa():
return defv
defv.setdefault('tag_1a_omzet', self.env.ref('l10n_nl.tag_nl_03').id)
defv.setdefault('tag_1a_btw', self.env.ref('l10n_nl.tag_nl_20').id)
defv.setdefault('tag_1b_omzet', self.env.ref('l10n_nl.tag_nl_05').id)
defv.setdefault('tag_1b_btw', self.env.ref('l10n_nl.tag_nl_22').id)
defv.setdefault('tag_1c_omzet', self.env.ref('l10n_nl.tag_nl_06').id)
defv.setdefault('tag_1c_btw', self.env.ref('l10n_nl.tag_nl_23').id)
defv.setdefault('tag_1d_omzet', self.env.ref('l10n_nl.tag_nl_07').id)
defv.setdefault('tag_1d_btw', self.env.ref('l10n_nl.tag_nl_24').id)
defv.setdefault('tag_1e_omzet', self.env.ref('l10n_nl.tag_nl_08').id)
defv.setdefault('tag_2a_omzet', self.env.ref('l10n_nl.tag_nl_10').id)
defv.setdefault('tag_2a_btw', self.env.ref('l10n_nl.tag_nl_27').id)
defv.setdefault('tag_3a_omzet', self.env.ref('l10n_nl.tag_nl_12').id)
defv.setdefault('tag_3b_omzet', self.env.ref('l10n_nl.tag_nl_40').id)
defv.setdefault('tag_3b_omzet_d', self.env.ref('l10n_nl.tag_nl_41').id)
defv.setdefault('tag_3c_omzet', self.env.ref('l10n_nl.tag_nl_14').id)
defv.setdefault('tag_4a_omzet', self.env.ref('l10n_nl.tag_nl_16').id)
defv.setdefault('tag_4a_btw', self.env.ref('l10n_nl.tag_nl_29').id)
defv.setdefault('tag_4b_omzet', self.env.ref('l10n_nl.tag_nl_17').id)
defv.setdefault('tag_4b_btw', self.env.ref('l10n_nl.tag_nl_30').id)
defv.setdefault('tag_5b_btw', self.env.ref('l10n_nl.tag_nl_33').id)
return defv
def _is_l10n_nl_coa(self):
l10n_nl_coa = self.env.ref('l10n_nl.l10nnl_chart_template', False)
company_coa = self.env.user.company_id.chart_template_id
return company_coa == l10n_nl_coa
def execute(self):
self.ensure_one()
company_id = self.env.user.company_id.id
config = self.env['l10n.nl.vat.statement.config'].search([
('company_id', '=', company_id)], limit=1
)
if not config:
config = self.env['l10n.nl.vat.statement.config'].create({
'company_id': company_id
})
config.write({
'company_id': company_id,
'tag_1a_omzet': self.tag_1a_omzet.id,
'tag_1a_btw': self.tag_1a_btw.id,
'tag_1b_omzet': self.tag_1b_omzet.id,
'tag_1b_btw': self.tag_1b_btw.id,
'tag_1c_omzet': self.tag_1c_omzet.id,
'tag_1c_btw': self.tag_1c_btw.id,
'tag_1d_omzet': self.tag_1d_omzet.id,
'tag_1d_btw': self.tag_1d_btw.id,
'tag_1e_omzet': self.tag_1e_omzet.id,
'tag_2a_omzet': self.tag_2a_omzet.id,
'tag_2a_btw': self.tag_2a_btw.id,
'tag_3a_omzet': self.tag_3a_omzet.id,
'tag_3b_omzet': self.tag_3b_omzet.id,
'tag_3b_omzet_d': self.tag_3b_omzet_d.id,
'tag_3c_omzet': self.tag_3c_omzet.id,
'tag_4a_omzet': self.tag_4a_omzet.id,
'tag_4a_btw': self.tag_4a_btw.id,
'tag_4b_omzet': self.tag_4b_omzet.id,
'tag_4b_btw': self.tag_4b_btw.id,
'tag_5b_btw': self.tag_5b_btw.id,
})
action_name = 'l10n_nl_tax_statement.action_account_vat_statement_nl'
action = self.env.ref(action_name).read()[0]
return action
|
PypiClean
|
/monk_gluon_cuda90-0.0.1.tar.gz/monk_gluon_cuda90-0.0.1/monk/gluon/finetune/level_13_updates_main.py
|
from monk.gluon.finetune.imports import *
from monk.system.imports import *
from monk.gluon.finetune.level_12_losses_main import prototype_losses
class prototype_updates(prototype_losses):
'''
Main class for all parametric update functions
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
##########################################################################################################################################################
@warning_checks(None, ["gte", 32, "lte", 1024], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_input_size(self, input_size):
'''
Update input size.
Args:
input_size (int): New input size
Returns:
None
'''
self.system_dict = set_input_size(input_size, self.system_dict);
self.custom_print("Update: Input size - {}".format(self.system_dict["dataset"]["params"]["input_size"]));
self.custom_print("");
@warning_checks(None, ["lte", 128], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_batch_size(self, batch_size):
'''
Update batch size.
Args:
batch_size (int): New batch size
Returns:
None
'''
self.system_dict = set_batch_size(batch_size, self.system_dict);
self.custom_print("Update: Batch size - {}".format(self.system_dict["dataset"]["params"]["batch_size"]));
self.custom_print("");
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_shuffle_data(self, shuffle):
'''
Update to shuffle data or not.
Args:
shuffle (bool): If True, will shuffle data
Returns:
None
'''
self.system_dict = set_data_shuffle(shuffle, self.system_dict);
self.custom_print("Update: Data shuffle - {}".format(self.system_dict["dataset"]["params"]["train_shuffle"]));
self.custom_print("");
@warning_checks(None, ["lte", psutil.cpu_count()], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_num_processors(self, num_processors):
'''
Update num processors for data loader.
Args:
num_processors (int): Max CPUs for data sampling
Returns:
None
'''
self.system_dict = set_num_processors(num_processors, self.system_dict);
self.custom_print("Update: Num processors - {}".format(self.system_dict["dataset"]["params"]["num_workers"]));
self.custom_print("");
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_weighted_sampling(self, sample):
'''
Function inactive
'''
self.system_dict = set_weighted_sampling(sample, self.system_dict);
self.custom_print("Update: Weighted Sampling - {}".format(self.system_dict["dataset"]["params"]["weighted_sample"]));
self.custom_print("");
@warning_checks(None, ["gt", 0.5, "lt", 1], post_trace=False)
@error_checks(None, ["gt", 0, "lt", 1], post_trace=False)
@accepts("self", float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_trainval_split(self, value):
'''
Update training-validation split
Args:
value (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
Returns:
None
'''
if(self.system_dict["dataset"]["dataset_type"] == "train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv=False;
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train"):
dataset_path = self.system_dict["dataset"]["train_path"];
path_to_csv = self.system_dict["dataset"]["csv_train"];
elif(self.system_dict["dataset"]["dataset_type"] == "csv_train-val"):
dataset_path = [self.system_dict["dataset"]["train_path"], self.system_dict["dataset"]["val_path"]];
path_to_csv = [self.system_dict["dataset"]["csv_train"], self.system_dict["dataset"]["csv_val"]];
else:
msg = "Dataset Type invalid.\n";
msg += "Cannot update split"
ConstraintsWarning(msg)
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, value, path_to_csv, self.system_dict["dataset"]["params"]["delimiter"]);
@warning_checks(None, dataset_path=None, split=["gt", 0.5, "lt", 1], path_to_csv=None, delimiter=None, post_trace=False)
@error_checks(None, dataset_path=["folder", 'r'], split=["gt", 0, "lt", 1], path_to_csv=["file", 'r'], delimiter=["in", [",", ";", "-", " "]], post_trace=False)
@accepts("self", dataset_path=[str, list], split=float, path_to_csv=[str, list, bool], delimiter=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_dataset(self, dataset_path=False, split=0.9, path_to_csv=False, delimiter=","):
'''
Update dataset path
Args:
dataset_path (str, list): Path to Dataset folder
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
path_to_csv (str, list): Path to csv file pointing towards images
1) Single string if validation data does not exist
2) List [train_path, val_path] in case of separate train and val data
value (float): Indicating train validation split
Division happens as follows:
train - total dataset * split * 100
val - total dataset * (1-split) * 100
delimiter (str): Delimiter for csv file
Returns:
None
'''
self.system_dict = set_dataset_train_path(self.system_dict, dataset_path, split, path_to_csv, delimiter);
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", str, force=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_model_name(self, model_name, force=False):
'''
Update model name
Args:
model_name (str): Select from available models. Check via List_Models() function
force (bool): Dummy function
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
else:
self.system_dict = set_model_name(model_name, self.system_dict);
self.custom_print("Update: Model name - {}".format(self.system_dict["model"]["params"]["model_name"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", [str, list], force=bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_model_path(self, model_path, force=False):
'''
Update model path for inferencing
Args:
model_path (str): Path to model weights.
force (bool): Dummy function
Returns:
None
'''
if(not force):
if(self.system_dict["training"]["status"]):
ConstraintWarning("Model trained using {}\n".format(self.system_dict["model"]["params"]["model_name"]));
ConstraintWarning("Changing the model will overwrite previously trained models if training is executed.\n");
inp = input("Do you wish to continue further (y/n):");
if(inp == "y"):
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.custom_print("Model not updated.");
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
else:
self.system_dict = set_model_path(model_path, self.system_dict);
self.custom_print("Update: Model path - {}".format(self.system_dict["model"]["params"]["model_path"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_use_gpu(self, gpu):
'''
Update to use gpu or cpu
Args:
gpu (bool): If True, then use GPU
Returns:
None
'''
self.system_dict = set_device(gpu, self.system_dict);
self.custom_print("Update: Use Gpu - {}".format(self.system_dict["model"]["params"]["use_gpu"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_use_pretrained(self, pretrained):
'''
Update to use pretrained wights or randomly initialized weights
Args:
pretrained (bool): If True, use pretrained weights
else, use randomly initialized weights
Returns:
None
'''
self.system_dict = set_pretrained(pretrained, self.system_dict);
self.custom_print("Update: Use pretrained - {}".format(self.system_dict["model"]["params"]["use_pretrained"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_base_network(self, freeze):
'''
Update whether freeze base network or not
Args:
freeze (bool): If True, then base network is non-trainable, works as a feature extractor
Returns:
None
'''
self.system_dict = set_freeze_base_network(freeze, self.system_dict);
self.custom_print("Update: Freeze Base Network - {}".format(self.system_dict["model"]["params"]["freeze_base_network"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, ["gte", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_freeze_layers(self, num_freeze):
'''
Update to freeze certain layers in the network
Args:
num_freeze (int): Number of layers to freeze in network starting from top
Returns:
None
'''
self.system_dict["model"]["params"]["num_freeze"] = num_freeze;
self.custom_print("Update: Freeze layers - {}".format(self.system_dict["model"]["params"]["num_freeze"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 100], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_num_epochs(self, num_epochs):
'''
Update number of epochs to train the network
Args:
num_epochs (int): New number of epochs
Returns:
None
'''
self.system_dict = set_num_epochs(num_epochs, self.system_dict);
self.custom_print("Update: Num Epochs - {}".format(self.system_dict["hyper-parameters"]["num_epochs"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@warning_checks(None, ["lt", 1], post_trace=False)
@error_checks(None, ["gt", 0], post_trace=False)
@accepts("self", [int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_learning_rate(self, learning_rate):
'''
Update base learning rate for training
Args:
learning_rate (float): New base learning rate
Returns:
None
'''
self.system_dict["hyper-parameters"]["learning_rate"] = learning_rate;
self.system_dict["hyper-parameters"]["optimizer"]["params"]["lr"] = learning_rate;
self.custom_print("Update: Learning Rate - {}".format(self.system_dict["hyper-parameters"]["learning_rate"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress_realtime(self, value):
'''
Update display progress param
Args:
value (bool): If True, then real time progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress_realtime(value, self.system_dict);
self.custom_print("Update: Display progress realtime - {}".format(self.system_dict["training"]["settings"]["display_progress_realtime"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_display_progress(self, value):
'''
Update display progress param
Args:
value (bool): If True, then per epoch progress is displayed
Returns:
None
'''
self.system_dict = set_display_progress(value, self.system_dict);
self.custom_print("Update: Display progress - {}".format(self.system_dict["training"]["settings"]["display_progress"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@error_checks(None, None, prefix=["name", ["A-Z", "a-z", "0-9", "-", "_"]], post_trace=False)
@accepts("self", bool, prefix=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_save_intermediate_models(self, value, prefix="intermediate_model_"):
'''
Update whether to save intermediate models or not
Args:
value (bool): If True, saves model weight post every epoch
prefix (str): Appends a prefix to intermediate weights
Returns:
None
'''
if(value):
if(not os.access(self.system_dict["model_dir"], os.W_OK)):
msg = "Folder \"{}\" has no read access".format(self.system_dict["model_dir"])
msg += "Cannot save Intermediate models";
raise ConstraintError(msg);
self.system_dict = set_save_intermediate_models(value, self.system_dict);
self.system_dict = set_intermediate_model_prefix(prefix, self.system_dict);
self.custom_print("Update: Save Intermediate models - {}".format(self.system_dict["training"]["settings"]["save_intermediate_models"]));
if(self.system_dict["training"]["settings"]["save_intermediate_models"]):
self.custom_print("Update: Intermediate model prefix - {}".format(self.system_dict["training"]["settings"]["intermediate_model_prefix"]));
self.custom_print("");
##########################################################################################################################################################
##########################################################################################################################################################
@accepts("self", bool, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def update_save_training_logs(self, value):
'''
Update whether to save training logs or not
Args:
value (bool): If True, saves all training and validation metrics. Required for comparison.
Returns:
None
'''
self.system_dict = set_save_training_logs(value, self.system_dict);
self.custom_print("Update: Save Training logs - {}".format(self.system_dict["training"]["settings"]["save_training_logs"]));
self.custom_print("");
##########################################################################################################################################################
|
PypiClean
|
/bta-0.6.tar.gz/bta-0.6/libesedb/libesedb.py
|
# This file is part of the BTA toolset
# (c) Airbus Group CERT, Airbus Group Innovations and Airbus DS CyberSecurity
from ctypes import cdll, c_void_p, c_int, pointer, byref, create_string_buffer, string_at
from esetypes import ColumnType,ValueFlags,native_type,multi_native_type
from sys import platform
import logging
log = logging.getLogger("libesedb")
class ESEDB_Exception(Exception):
pass
class ESEDB_Error(ESEDB_Exception):
pass
class LibESEDB(object):
# keep references to those functions that are called in destructors
byref = byref
c_void_p = c_void_p
def __init__(self, ignore_errors=False, report_error=lambda x:None):
self.ignore_errors = ignore_errors
self.report_error = report_error
try:
if platform.startswith("linux"):
self.lib = cdll.LoadLibrary("libesedb.so")
elif platform.startswith("win32"):
self.lib = cdll.LoadLibrary("libesedb.dll")
elif platform.startswith("darwin"):
self.lib = cdll.LoadLibrary("libesedb.dylib")
except OSError,e:
if e.args[0].endswith("cannot open shared object file: No such file or directory"):
raise ESEDB_Error(
"%s. Did you install it or did you use LD_LIBRARY_PATH correctly ?"
% e.message)
raise
self.error = c_void_p()
self.error_p = pointer(self.error)
def _func(self, funcname):
funcname = "libesedb_"+funcname
func = getattr(self.lib, funcname)
def _call(*args):
args += (self.error_p,)
if func(*args) != 1:
errmsg = "%s: %s" % (funcname, self.get_error(self.error))
if self.ignore_errors:
errmsg = "IGNORED: %s" % errmsg
log.warning(errmsg)
self.report_error(errmsg)
return
raise ESEDB_Exception(errmsg)
return _call
def get_error(self, error):
sz = 2048
msgbuf = create_string_buffer(sz)
if self.lib.liberror_error_sprint(error, byref(msgbuf), sz) == -1:
raise ESEDB_Exception("liberror_error_sprint: unkown error!")
return msgbuf.value
def open(self, fname, flags=1):
f = c_void_p()
self._func("file_initialize")(byref(f))
self._func("file_open")(f, fname, flags)
return f
def file_get_number_of_tables(self, f):
nb = c_int()
self._func("file_get_number_of_tables")(f, byref(nb))
return nb.value
def file_get_table(self, f, table_num):
table = c_void_p()
self._func("file_get_table")(f, table_num, byref(table))
return table
def table_get_utf8_name(self, table):
sz = c_int()
self._func("table_get_utf8_name_size")(table, byref(sz))
name = create_string_buffer(sz.value)
self._func("table_get_utf8_name")(table, byref(name), sz)
return name.value.decode("utf8")
def table_get_number_of_columns(self, table, flags=0):
nb = c_int()
self._func("table_get_number_of_columns")(table, byref(nb), flags)
return nb.value
def table_get_column(self, table, col_num, flags=0):
column = c_void_p()
self._func("table_get_column")(table, col_num, byref(column), flags)
return column
def table_free(self, table):
self._func("table_free")(self.byref(table))
def table_get_number_of_records(self, table):
nb = c_int()
self._func("table_get_number_of_records")(table, byref(nb))
return nb.value
def table_get_record(self, table, record_num):
record = c_void_p()
self._func("table_get_record")(table, record_num, byref(record))
return record
def column_get_utf8_name(self, column):
sz = c_int()
self._func("column_get_utf8_name_size")(column, byref(sz))
name = create_string_buffer(sz.value)
self._func("column_get_utf8_name")(column, byref(name), sz)
return name.value.decode("utf8")
def column_get_type(self, column):
typ = c_int()
self._func("column_get_type")(column, byref(typ))
return typ.value
def column_free(self, column):
self._func("column_free")(self.byref(column))
def record_get_number_of_values(self, record):
sz = c_int()
self._func("record_get_number_of_values")(record, byref(sz))
return sz.value
def record_get_column_identifier(self, record, value_num):
ident = c_int()
self._func("record_get_column_identifier")(record, value_num, byref(ident))
return ident.value
def record_get_column_type(self, record, value_num):
typ = c_int()
self._func("record_get_column_type")(record, value_num, byref(typ))
return typ.value
def record_get_value(self, record, value_num):
flags = c_int()
datalen = c_int()
data=c_void_p()
self._func("record_get_value")(record, value_num, byref(data), byref(datalen), byref(flags))
return string_at(data, datalen.value), flags.value
def record_get_long_value(self, record, value_num):
long_value = c_void_p()
self._func("record_get_long_value")(record, value_num, byref(long_value))
return long_value
def record_free(self, record):
self._func("record_free")(self.byref(record))
def long_value_get_number_of_segments(self, long_value):
sz = c_int()
self._func("long_value_get_number_of_segments")(long_value, byref(sz))
return sz.value
def long_value_get_segment_data(self, long_value, segment_num):
datalen = c_int()
data=c_void_p()
self._func("long_value_get_segment_data")(long_value, segment_num, byref(data), byref(datalen))
return string_at(data, datalen.value)
class ESEDB(object):
def __init__(self, fname, ignore_errors=False, report_error=None):
self.lib = LibESEDB(ignore_errors=ignore_errors, report_error=report_error)
self.file = self.lib.open(fname)
self.tables = [ESETable(self, i) for i in range(self.lib.file_get_number_of_tables(self.file))]
self.name2table = {t.name:t for t in self.tables}
def __getitem__(self, i):
try:
return self.tables[i]
except TypeError:
return self.name2table[i]
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __iter__(self):
return iter(self.tables)
def __repr__(self):
return "<ESEDB: %s>" % " ".join(t.name for t in self.tables)
class ESETable(object):
def __init__(self, db, table_num):
self.db = db
self.lib = db.lib
self.table_num = table_num
self.table = self.lib.file_get_table(self.db.file, table_num)
self.name = self.lib.table_get_utf8_name(self.table)
self.columns = [ESEColumn(self, i) for i in range(self.lib.table_get_number_of_columns(self.table))]
self.name2column = {c.name:c for c in self.columns}
self._number_of_records = None # expensive to get, so we wait for it to be actually needed
@property
def number_of_records(self):
if self._number_of_records is None:
self._number_of_records = self.lib.table_get_number_of_records(self.table)
return self._number_of_records
def __del__(self):
if hasattr(self, "table"):
self.lib.table_free(self.table)
def __getitem__(self, i):
try:
return self.columns[i]
except TypeError:
return self.name2column[i]
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __iter__(self):
return iter(self.columns)
def iter_records(self, entries=None, columns=None):
if entries is None:
if columns is not None:
entries = [c.column_num for c in columns]
return (ESERecord(self, i, limit=entries) for i in xrange(self.number_of_records))
class ESEColumn(object):
def __init__(self, table, column_num):
self.table = table
self.lib = table.lib
self.column_num = column_num
self.column = self.lib.table_get_column(self.table.table, column_num)
try:
self.name = self.lib.column_get_utf8_name(self.column)
self.type = self.lib.column_get_type(self.column)
finally:
self.lib.column_free(self.column)
self.column = None
class ESERecord(object):
def __init__(self, table, record_num, limit=None):
self.table = table
self.lib = table.lib
self.record_num = record_num
self.record = self.lib.table_get_record(self.table.table, record_num)
try:
self.value_entries = limit if limit is not None else range(self.lib.record_get_number_of_values(self.record))
self.values=list()
for i in self.value_entries:
try:
self.values.append(ESEValue(self, i))
except:
a=ESEValue(self,i)
a.value=u"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
self.values.append(a)
log.warning("====> %r" % (ESEValue(self,i)))
finally:
self.lib.record_free(self.record)
self.record = None
def __iter__(self):
return iter(self.values)
class ESEValue(object):
__slots__ = ["record", "lib", "num", "type", "flags", "value"]
def __init__(self, record, value_num):
self.record = record
self.lib = record.lib
self.num = value_num
self.type = self.record.table.columns[value_num].type
value,self.flags = self.lib.record_get_value(self.record.record, value_num)
if not value:
self.value = None
else:
if self.flags & ValueFlags.LONG_VALUE:
try:
lv = self.lib.record_get_long_value(self.record.record, value_num)
except ESEDB_Exception,e:
log.warning("error %s on line %d column %d flag %08x" % (e,self.record.record_num,value_num, self.flags))
value = None
raise
else:
segnb = self.lib.long_value_get_number_of_segments(lv)
segs = [self.lib.long_value_get_segment_data(lv, i) for i in xrange(segnb)]
value = "".join(segs)
if self.flags & ValueFlags.MULTI_VALUE:
self.value = multi_native_type(self.flags, self.type, value)
else:
self.value = native_type(self.type, value)
# Removed for perf reasons and because nobody needs these values yet
#
# self.id =self.lib.record_get_column_identifier(self.record.record, value_num)
# self.hexvalue = self.value.encode("hex")
# self.texttype = ColumnType[self.type]
# self.textflags = ValueFlags.flag(self.flags)
@property
def strvalue(self):
if self.value is None:
return ""
if self.type in [ColumnType.BINARY_DATA,
ColumnType.LARGE_BINARY_DATA,
ColumnType.SUPER_LARGE_VALUE]:
return self.value.encode("hex")
return str(self.value)
def __repr__(self):
return "<val:type=%s:flags=%s:value=%s>" % (ColumnType[self.type], ValueFlags.flag(self.flags), self.strvalue )
def test():
import sys
l = LibESEDB()
f = l.open(sys.argv[1])
nbt = l.file_get_number_of_tables(f)
for i in range(nbt):
table = l.file_get_table(f, i)
print "%2i %s #records=%i" % (i,l.table_get_utf8_name(table), l.table_get_number_of_records(table))
for j in range(l.table_get_number_of_columns(table)):
col = l.table_get_column(table, j)
print " %5i:%s" % (j, l.column_get_utf8_name(col))
l.column_free(col)
l.table_free(table)
def test2():
import sys
db = ESEDB(sys.argv[1])
for r in db.sd_table.iter_records():
print
for v in r:
print v
def test3():
import sys
db = ESEDB(sys.argv[1])
sys.stdout.write("\t".join(c.name for c in db.sd_table) + "\n")
i = 0
for r in db.datatable.iter_records():
sys.stdout.write("\t".join(v.strvalue for v in r) + "\n")
i+=1
if i > 500:
break
if __name__ == "__main__":
test3()
|
PypiClean
|
/bqplot_gl-0.1.0a0.tar.gz/bqplot_gl-0.1.0a0/src/LinesGLView.ts
|
import { isLinearScale } from 'bqscales';
import { Lines } from 'bqplot';
import * as THREE from 'three';
// @ts-ignore This should not be needed if we upgrade to newer versions of ThreeJS
window.THREE = THREE;
import 'three/examples/js/lines/LineSegments2';
import 'three/examples/js/lines/Line2';
import 'three/examples/js/lines/LineMaterial';
import 'three/examples/js/lines/LineSegmentsGeometry';
import 'three/examples/js/lines/LineGeometry';
import { LinesGLModel } from './LinesGLModel';
import { Values } from './values';
import { ScaleType, initializeBqplotFigure } from './utils';
export class LinesGLView extends Lines {
async render() {
await super.render();
initializeBqplotFigure(this.parent);
// Create material for markers
this.material = new THREE.LineMaterial();
this.material.uniforms.domain_x = { type: '2f', value: [0, 1] };
this.material.uniforms.domain_y = { type: '2f', value: [0, 1] };
this.material.uniforms.range_x = { type: '2f', value: [0, 1] };
this.material.uniforms.range_y = { type: '2f', value: [0, 1] };
this.material.uniforms.diffuse = { type: '3f', value: [1, 0, 0] };
this.material.uniforms.opacity = { type: 'f', value: 1.0 };
this.material.defines.USE_SCALE_X = true;
this.material.defines.USE_SCALE_Y = true;
this.material.defines.SCALE_TYPE_X = ScaleType.SCALE_TYPE_LINEAR;
this.material.defines.SCALE_TYPE_Y = ScaleType.SCALE_TYPE_LINEAR;
this.updateMaterialScales();
this.material.onBeforeCompile = this.beforeCompile;
this.update_stroke_width();
this.update_style();
this.geometry = new THREE.LineGeometry();
this.updateGeometry();
this.line = new THREE.Line2(this.geometry, this.material);
this.line.frustumCulled = false;
this.scene = new THREE.Scene();
this.scene.add(this.line);
this.listenTo(this.model, 'change:x change:y', this.updateGeometry);
this.listenTo(this.model, 'change:stroke_width', this.update_stroke_width);
this.parent.extras.webGLMarks.push(this);
this.parent.extras.webGLRequestRender();
}
beforeCompile(shader) {
// we include the scales header, and a snippet that uses the scales
shader.vertexShader =
'// added by bqplot-image-gl\n#include <scales>\n // added by bqplot-image-gl\n' +
shader.vertexShader;
const transform = `
vec3 instanceStart_transformed = instanceStart;
vec3 instanceEnd_transformed = instanceEnd;
instanceStart_transformed.x = SCALE_X(instanceStart_transformed.x);
instanceStart_transformed.y = SCALE_Y(instanceStart_transformed.y);
instanceEnd_transformed.x = SCALE_X(instanceEnd_transformed.x);
instanceEnd_transformed.y = SCALE_Y(instanceEnd_transformed.y);
vec4 start = modelViewMatrix * vec4( instanceStart_transformed, 1.0 );
vec4 end = modelViewMatrix * vec4( instanceEnd_transformed, 1.0 );
`;
// we modify the shader to replace a piece
const begin = 'vec4 start = modelViewMatrix * vec4( instanceStart, 1.0 );';
const offset_begin = shader.vertexShader.indexOf(begin);
if (offset_begin == -1) {
console.error('Could not find magic begin line in shader');
}
const end = 'vec4 end = modelViewMatrix * vec4( instanceEnd, 1.0 );';
const offset_end = shader.vertexShader.indexOf(end);
if (offset_end == -1) {
console.error('Could not find magic end line in shader');
}
shader.vertexShader =
shader.vertexShader.slice(0, offset_begin) +
transform +
shader.vertexShader.slice(offset_end + end.length);
}
updateGeometry() {
const scalar_names = ['x', 'y', 'z'];
const vector4_names = [];
const get_value = (name, index, default_value) => {
if (name === 'z') {
return 0;
}
return this.model.get(name);
};
const sequence_index = 0; // not used (see ipyvolume)
const current = new Values(
scalar_names,
[],
get_value,
sequence_index,
vector4_names
);
current.ensure_array('z');
current.merge_to_vec3(['x', 'y', 'z'], 'position');
// important to reset this, otherwise we may use an old buffered value
// Note that if we upgrade threejs, this may be named differently https://github.com/mrdoob/three.js/issues/18990
this.geometry.maxInstancedCount = undefined;
this.geometry.setDrawRange(
0,
Math.min(this.model.get('x').length, this.model.get('y').length)
);
this.geometry.setPositions(current.array_vec3['position']);
this.parent.extras.webGLRequestRender();
}
update_line_xy(animate: boolean) {
this.parent.extras.webGLRequestRender();
}
update_style() {
const color = new THREE.Color(this.model.get('colors')[0]);
this.material.color = color.toArray();
const opacities = this.model.get('opacities');
if (opacities && opacities.length) {
this.material.uniforms.opacity.value = opacities[0];
} else {
this.material.uniforms.opacity.value = 1;
}
this.parent.extras.webGLRequestRender();
}
update_stroke_width() {
this.material.linewidth = this.model.get('stroke_width');
this.parent.extras.webGLRequestRender();
}
updateMaterialScales() {
const x_scale = this.scales.x ? this.scales.x : this.parent.scale_x;
const y_scale = this.scales.y ? this.scales.y : this.parent.scale_y;
this.material.defines.SCALE_TYPE_X = isLinearScale(x_scale)
? ScaleType.SCALE_TYPE_LINEAR
: ScaleType.SCALE_TYPE_LOG;
this.material.defines.SCALE_TYPE_Y = isLinearScale(y_scale)
? ScaleType.SCALE_TYPE_LINEAR
: ScaleType.SCALE_TYPE_LOG;
this.material.needsUpdate = true;
}
renderGL() {
const fig = this.parent;
const x_scale = this.scales.x ? this.scales.x : this.parent.scale_x;
const y_scale = this.scales.y ? this.scales.y : this.parent.scale_y;
const range_x = this.parent.padded_range('x', x_scale.model);
const range_y = this.parent.padded_range('y', y_scale.model);
this.material.uniforms['domain_x'].value = x_scale.scale.domain();
this.material.uniforms['domain_y'].value = y_scale.scale.domain();
this.material.uniforms['range_x'].value = range_x;
this.material.uniforms['range_y'].value = [range_y[1], range_y[0]];
this.material.uniforms['resolution'].value = [
fig.plotareaWidth,
fig.plotareaHeight,
];
this.updateMaterialScales();
const { renderer, camera } = fig.extras.webGLRenderer;
renderer.render(this.scene, camera);
}
draw(animate) {}
line: THREE.Line2;
material: THREE.LineMaterial;
geometry: THREE.LineGeometry;
scene: THREE.Scene;
model: LinesGLModel;
}
|
PypiClean
|
/ripiu.cmsplugin_articles-0.4.2-py3-none-any.whl/ripiu/cmsplugin_articles/models.py
|
from cms.models import CMSPlugin
from cms.models.fields import PageField
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from djangocms_attributes_field.fields import AttributesField
from modelmixins import ModelMixin
from .conf import settings as conf # NOQA
LEFT = 'left'
RIGHT = 'right'
CENTER = 'center'
ALIGN_CHOICES = (
(LEFT, _('Left')),
(RIGHT, _('Right')),
(CENTER, _('Center')),
)
def get_templates():
choices = [
('default', _('Default')),
]
choices += settings.RIPIU_ARTICLES_TEMPLATES
return choices
class TemplateAttributesMixin(ModelMixin):
template = models.CharField(
_('Template'),
choices=get_templates(),
default=get_templates()[0][0],
max_length=255,
)
attributes = AttributesField(
verbose_name=_('Attributes'),
blank=True,
)
class HeadedPluginModel(CMSPlugin):
H1 = 1
H2 = 2
H3 = 3
H4 = 4
H5 = 5
H6 = 6
HEADING_LEVELS = (
(H1, 'H1'),
(H2, 'H2'),
(H3, 'H3'),
(H4, 'H4'),
(H5, 'H5'),
(H6, 'H6'),
)
title = models.CharField(
_('Title'), max_length=400, default='', blank=True
)
heading_level = models.PositiveSmallIntegerField(
_('Heading level'),
choices=HEADING_LEVELS,
default=H2,
help_text=_('Choose a heading level'),
)
subtitle = models.CharField(
_('Subtitle'), max_length=400, default='', blank=True,
)
header_alignment = models.CharField(
_('Header alignment'),
max_length=10, blank=True,
choices=ALIGN_CHOICES
)
def __str__(self):
return self.title or ''
class Meta:
abstract = True
class ArticlePluginModel(TemplateAttributesMixin, HeadedPluginModel):
"""
An article
"""
full_article = PageField(
on_delete=models.SET_NULL,
blank=True, null=True,
verbose_name=_('Full article page'),
help_text=_('You may specify a page with a full article'),
)
class Meta:
verbose_name = _('Article')
verbose_name_plural = _('Articles')
class SectionPluginModel(TemplateAttributesMixin, HeadedPluginModel):
"""
A section
"""
class Meta:
verbose_name = _('Section')
verbose_name_plural = _('Sections')
|
PypiClean
|
/django-audiotracks-0.2.4.tar.gz/django-audiotracks-0.2.4/audiotracks/views.py
|
import os
from django.utils.translation import ugettext
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import RequestSite
from django.conf import settings
from django.core.files.uploadhandler import TemporaryFileUploadHandler
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core import urlresolvers
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from django.contrib import messages
from django.contrib.auth.models import User
try:
import mutagen
except ImportError:
import mutagenx as mutagen # Py3
from audiotracks.models import get_track_model
from audiotracks.forms import TrackUploadForm, TrackEditForm
METADATA_FIELDS = ('title', 'artist', 'genre', 'description', 'date')
def paginate(tracks, page_number):
per_page = getattr(settings, 'AUDIOTRACKS_PER_PAGE', 10)
paginator = Paginator(tracks, per_page)
if page_number is None:
page = paginator.page(1)
else:
try:
page = paginator.page(int(page_number))
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of
# results.
page = paginator.page(paginator.num_pages)
return page, page.object_list
def index(request, username=None, page_number=None):
tracks = get_track_model().objects
if username:
tracks = tracks.filter(user__username=username)
tracks = tracks.order_by('-created_at').all()
page, tracks = paginate(tracks, page_number)
base_path = urlresolvers.reverse(
'audiotracks',
args=[username] if username is not None else [])
return render_to_response("audiotracks/latest.html", {
'username': username, 'tracks': tracks, 'page': page,
'base_path': base_path,
}, context_instance=RequestContext(request))
def user_index(request, username, page_number=None):
user = get_object_or_404(User, username=username)
tracks = user.tracks.order_by('-created_at').all()
page, tracks = paginate(tracks, page_number)
base_path = urlresolvers.reverse('user_index', args=[username])
return render_to_response("audiotracks/user_index.html", {
'username': username, 'tracks': tracks, 'page': page,
'base_path': base_path,
}, context_instance=RequestContext(request))
def track_detail(request, track_slug, username=None):
params = {'slug': track_slug}
params['user__username'] = username
track = get_object_or_404(get_track_model(), **params)
return render_to_response("audiotracks/detail.html",
{'username': username, 'track': track},
context_instance=RequestContext(request))
def set_temporary_file_upload_handler(request):
# Disable in memory upload before accessing POST
# because we need a file from which to read metadata
request.upload_handlers = [TemporaryFileUploadHandler()]
@login_required
@csrf_exempt # request.POST is accessed by CsrfViewMiddleware
def upload_track(request):
set_temporary_file_upload_handler(request)
if request.method == "POST":
form = TrackUploadForm(request.POST, request.FILES)
if form.is_valid():
audio_file = request.FILES['audio_file']
audio_file_path = audio_file.temporary_file_path()
metadata = mutagen.File(audio_file_path, easy=True)
track = form.save(commit=False)
track.user = request.user
for field in METADATA_FIELDS:
if metadata and metadata.get(field):
setattr(track, field, metadata.get(field)[0])
track.save()
return HttpResponseRedirect(urlresolvers.reverse('edit_track',
args=[track.id]))
else:
form = TrackUploadForm()
return render_to_response("audiotracks/new.html", {'form': form},
context_instance=RequestContext(request))
def update_audiofile_metadata(track):
filepath = track.audio_file.path
metadata = mutagen.File(filepath, easy=True)
if metadata:
for field in METADATA_FIELDS:
try:
metadata[field] = getattr(track, field)
except mutagen.easyid3.EasyID3KeyError:
pass
metadata.save()
@login_required
def edit_track(request, track_id):
username = request.user.username
track = request.user.tracks.get(id=track_id)
if request.method == "POST":
form = TrackEditForm(request.POST, request.FILES, instance=track)
if form.is_valid():
track = form.save()
update_audiofile_metadata(track)
if 'delete_image' in request.POST:
track.image = None
track.save()
messages.add_message(request, messages.INFO,
ugettext('Your changes have been saved.'))
redirect_url = urlresolvers.reverse('user_index', args=[username])
return HttpResponseRedirect(redirect_url)
else:
form = TrackEditForm(instance=track, )
track_url_args = ['']
track_url_args.insert(0, username)
track_detail_url = urlresolvers.reverse('track_detail',
args=track_url_args)
track_url_prefix = request.build_absolute_uri(track_detail_url)
track_filename = os.path.basename(track.audio_file.name)
return render_to_response("audiotracks/edit.html", {
'form': form,
'track': track,
'track_url_prefix': track_url_prefix,
'track_filename': track_filename,
}, context_instance=RequestContext(request))
@login_required
def confirm_delete_track(request, track_id):
track = get_object_or_404(request.user.tracks, id=track_id)
default_origin_url = urlresolvers.reverse('user_index',
args=[request.user.username])
return render_to_response("audiotracks/confirm_delete.html", {
'track': track,
'came_from': request.GET.get('came_from', default_origin_url)
}, context_instance=RequestContext(request))
@login_required
def delete_track(request):
track_id = request.POST.get('track_id')
track = get_object_or_404(request.user.tracks, id=track_id)
track.delete()
messages.add_message(request, messages.INFO,
ugettext('"%s" has been deleted.') % track.title)
return HttpResponseRedirect(request.POST.get('came_from', '/'))
class JavaScriptView(TemplateView):
def render_to_response(self, context, **response_kwargs):
response_kwargs['content_type'] = "application/javascript"
return super(JavaScriptView, self).render_to_response(
context, **response_kwargs)
player_script = JavaScriptView.as_view(template_name="audiotracks/player.js")
def m3u(request, username=None):
tracks = get_track_model().objects
if username:
tracks = tracks.filter(user__username=username)
tracks = tracks.order_by('-created_at').all()
response = HttpResponse(content_type="audio/x-mpequrl")
site = RequestSite(request)
filename = "playlist-%s.m3u" % (site.name if username is None
else username)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
for track in tracks:
url = 'http://%s/%s' % (site.domain, track.audio_file.url.strip("/"))
response.write(url + "\n")
return response
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.