id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/DynamicForms-0.74.8-py3-none-any.whl/dynamicforms_legacy/static/bootstrap-3.4.1-dist/js/bootstrap.js | if (typeof jQuery === 'undefined') {
throw new Error('Bootstrap\'s JavaScript requires jQuery')
}
+function ($) {
'use strict';
var version = $.fn.jquery.split(' ')[0].split('.')
if ((version[0] < 2 && version[1] < 9) || (version[0] == 1 && version[1] == 9 && version[2] < 1) || (version[0] > 3)) {
throw new Error('Bootstrap\'s JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4')
}
}(jQuery);
/* ========================================================================
* Bootstrap: transition.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#transitions
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// CSS TRANSITION SUPPORT (Shoutout: https://modernizr.com/)
// ============================================================
function transitionEnd() {
var el = document.createElement('bootstrap')
var transEndEventNames = {
WebkitTransition : 'webkitTransitionEnd',
MozTransition : 'transitionend',
OTransition : 'oTransitionEnd otransitionend',
transition : 'transitionend'
}
for (var name in transEndEventNames) {
if (el.style[name] !== undefined) {
return { end: transEndEventNames[name] }
}
}
return false // explicit for ie8 ( ._.)
}
// https://blog.alexmaccaw.com/css-transitions
$.fn.emulateTransitionEnd = function (duration) {
var called = false
var $el = this
$(this).one('bsTransitionEnd', function () { called = true })
var callback = function () { if (!called) $($el).trigger($.support.transition.end) }
setTimeout(callback, duration)
return this
}
$(function () {
$.support.transition = transitionEnd()
if (!$.support.transition) return
$.event.special.bsTransitionEnd = {
bindType: $.support.transition.end,
delegateType: $.support.transition.end,
handle: function (e) {
if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments)
}
}
})
}(jQuery);
/* ========================================================================
* Bootstrap: alert.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#alerts
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// ALERT CLASS DEFINITION
// ======================
var dismiss = '[data-dismiss="alert"]'
var Alert = function (el) {
$(el).on('click', dismiss, this.close)
}
Alert.VERSION = '3.4.1'
Alert.TRANSITION_DURATION = 150
Alert.prototype.close = function (e) {
var $this = $(this)
var selector = $this.attr('data-target')
if (!selector) {
selector = $this.attr('href')
selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
selector = selector === '#' ? [] : selector
var $parent = $(document).find(selector)
if (e) e.preventDefault()
if (!$parent.length) {
$parent = $this.closest('.alert')
}
$parent.trigger(e = $.Event('close.bs.alert'))
if (e.isDefaultPrevented()) return
$parent.removeClass('in')
function removeElement() {
// detach from parent, fire event then clean up data
$parent.detach().trigger('closed.bs.alert').remove()
}
$.support.transition && $parent.hasClass('fade') ?
$parent
.one('bsTransitionEnd', removeElement)
.emulateTransitionEnd(Alert.TRANSITION_DURATION) :
removeElement()
}
// ALERT PLUGIN DEFINITION
// =======================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.alert')
if (!data) $this.data('bs.alert', (data = new Alert(this)))
if (typeof option == 'string') data[option].call($this)
})
}
var old = $.fn.alert
$.fn.alert = Plugin
$.fn.alert.Constructor = Alert
// ALERT NO CONFLICT
// =================
$.fn.alert.noConflict = function () {
$.fn.alert = old
return this
}
// ALERT DATA-API
// ==============
$(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)
}(jQuery);
/* ========================================================================
* Bootstrap: button.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#buttons
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// BUTTON PUBLIC CLASS DEFINITION
// ==============================
var Button = function (element, options) {
this.$element = $(element)
this.options = $.extend({}, Button.DEFAULTS, options)
this.isLoading = false
}
Button.VERSION = '3.4.1'
Button.DEFAULTS = {
loadingText: 'loading...'
}
Button.prototype.setState = function (state) {
var d = 'disabled'
var $el = this.$element
var val = $el.is('input') ? 'val' : 'html'
var data = $el.data()
state += 'Text'
if (data.resetText == null) $el.data('resetText', $el[val]())
// push to event loop to allow forms to submit
setTimeout($.proxy(function () {
$el[val](data[state] == null ? this.options[state] : data[state])
if (state == 'loadingText') {
this.isLoading = true
$el.addClass(d).attr(d, d).prop(d, true)
} else if (this.isLoading) {
this.isLoading = false
$el.removeClass(d).removeAttr(d).prop(d, false)
}
}, this), 0)
}
Button.prototype.toggle = function () {
var changed = true
var $parent = this.$element.closest('[data-toggle="buttons"]')
if ($parent.length) {
var $input = this.$element.find('input')
if ($input.prop('type') == 'radio') {
if ($input.prop('checked')) changed = false
$parent.find('.active').removeClass('active')
this.$element.addClass('active')
} else if ($input.prop('type') == 'checkbox') {
if (($input.prop('checked')) !== this.$element.hasClass('active')) changed = false
this.$element.toggleClass('active')
}
$input.prop('checked', this.$element.hasClass('active'))
if (changed) $input.trigger('change')
} else {
this.$element.attr('aria-pressed', !this.$element.hasClass('active'))
this.$element.toggleClass('active')
}
}
// BUTTON PLUGIN DEFINITION
// ========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.button')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.button', (data = new Button(this, options)))
if (option == 'toggle') data.toggle()
else if (option) data.setState(option)
})
}
var old = $.fn.button
$.fn.button = Plugin
$.fn.button.Constructor = Button
// BUTTON NO CONFLICT
// ==================
$.fn.button.noConflict = function () {
$.fn.button = old
return this
}
// BUTTON DATA-API
// ===============
$(document)
.on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) {
var $btn = $(e.target).closest('.btn')
Plugin.call($btn, 'toggle')
if (!($(e.target).is('input[type="radio"], input[type="checkbox"]'))) {
// Prevent double click on radios, and the double selections (so cancellation) on checkboxes
e.preventDefault()
// The target component still receive the focus
if ($btn.is('input,button')) $btn.trigger('focus')
else $btn.find('input:visible,button:visible').first().trigger('focus')
}
})
.on('focus.bs.button.data-api blur.bs.button.data-api', '[data-toggle^="button"]', function (e) {
$(e.target).closest('.btn').toggleClass('focus', /^focus(in)?$/.test(e.type))
})
}(jQuery);
/* ========================================================================
* Bootstrap: carousel.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#carousel
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// CAROUSEL CLASS DEFINITION
// =========================
var Carousel = function (element, options) {
this.$element = $(element)
this.$indicators = this.$element.find('.carousel-indicators')
this.options = options
this.paused = null
this.sliding = null
this.interval = null
this.$active = null
this.$items = null
this.options.keyboard && this.$element.on('keydown.bs.carousel', $.proxy(this.keydown, this))
this.options.pause == 'hover' && !('ontouchstart' in document.documentElement) && this.$element
.on('mouseenter.bs.carousel', $.proxy(this.pause, this))
.on('mouseleave.bs.carousel', $.proxy(this.cycle, this))
}
Carousel.VERSION = '3.4.1'
Carousel.TRANSITION_DURATION = 600
Carousel.DEFAULTS = {
interval: 5000,
pause: 'hover',
wrap: true,
keyboard: true
}
Carousel.prototype.keydown = function (e) {
if (/input|textarea/i.test(e.target.tagName)) return
switch (e.which) {
case 37: this.prev(); break
case 39: this.next(); break
default: return
}
e.preventDefault()
}
Carousel.prototype.cycle = function (e) {
e || (this.paused = false)
this.interval && clearInterval(this.interval)
this.options.interval
&& !this.paused
&& (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
return this
}
Carousel.prototype.getItemIndex = function (item) {
this.$items = item.parent().children('.item')
return this.$items.index(item || this.$active)
}
Carousel.prototype.getItemForDirection = function (direction, active) {
var activeIndex = this.getItemIndex(active)
var willWrap = (direction == 'prev' && activeIndex === 0)
|| (direction == 'next' && activeIndex == (this.$items.length - 1))
if (willWrap && !this.options.wrap) return active
var delta = direction == 'prev' ? -1 : 1
var itemIndex = (activeIndex + delta) % this.$items.length
return this.$items.eq(itemIndex)
}
Carousel.prototype.to = function (pos) {
var that = this
var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active'))
if (pos > (this.$items.length - 1) || pos < 0) return
if (this.sliding) return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, "slid"
if (activeIndex == pos) return this.pause().cycle()
return this.slide(pos > activeIndex ? 'next' : 'prev', this.$items.eq(pos))
}
Carousel.prototype.pause = function (e) {
e || (this.paused = true)
if (this.$element.find('.next, .prev').length && $.support.transition) {
this.$element.trigger($.support.transition.end)
this.cycle(true)
}
this.interval = clearInterval(this.interval)
return this
}
Carousel.prototype.next = function () {
if (this.sliding) return
return this.slide('next')
}
Carousel.prototype.prev = function () {
if (this.sliding) return
return this.slide('prev')
}
Carousel.prototype.slide = function (type, next) {
var $active = this.$element.find('.item.active')
var $next = next || this.getItemForDirection(type, $active)
var isCycling = this.interval
var direction = type == 'next' ? 'left' : 'right'
var that = this
if ($next.hasClass('active')) return (this.sliding = false)
var relatedTarget = $next[0]
var slideEvent = $.Event('slide.bs.carousel', {
relatedTarget: relatedTarget,
direction: direction
})
this.$element.trigger(slideEvent)
if (slideEvent.isDefaultPrevented()) return
this.sliding = true
isCycling && this.pause()
if (this.$indicators.length) {
this.$indicators.find('.active').removeClass('active')
var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)])
$nextIndicator && $nextIndicator.addClass('active')
}
var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, "slid"
if ($.support.transition && this.$element.hasClass('slide')) {
$next.addClass(type)
if (typeof $next === 'object' && $next.length) {
$next[0].offsetWidth // force reflow
}
$active.addClass(direction)
$next.addClass(direction)
$active
.one('bsTransitionEnd', function () {
$next.removeClass([type, direction].join(' ')).addClass('active')
$active.removeClass(['active', direction].join(' '))
that.sliding = false
setTimeout(function () {
that.$element.trigger(slidEvent)
}, 0)
})
.emulateTransitionEnd(Carousel.TRANSITION_DURATION)
} else {
$active.removeClass('active')
$next.addClass('active')
this.sliding = false
this.$element.trigger(slidEvent)
}
isCycling && this.cycle()
return this
}
// CAROUSEL PLUGIN DEFINITION
// ==========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.carousel')
var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)
var action = typeof option == 'string' ? option : options.slide
if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))
if (typeof option == 'number') data.to(option)
else if (action) data[action]()
else if (options.interval) data.pause().cycle()
})
}
var old = $.fn.carousel
$.fn.carousel = Plugin
$.fn.carousel.Constructor = Carousel
// CAROUSEL NO CONFLICT
// ====================
$.fn.carousel.noConflict = function () {
$.fn.carousel = old
return this
}
// CAROUSEL DATA-API
// =================
var clickHandler = function (e) {
var $this = $(this)
var href = $this.attr('href')
if (href) {
href = href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7
}
var target = $this.attr('data-target') || href
var $target = $(document).find(target)
if (!$target.hasClass('carousel')) return
var options = $.extend({}, $target.data(), $this.data())
var slideIndex = $this.attr('data-slide-to')
if (slideIndex) options.interval = false
Plugin.call($target, options)
if (slideIndex) {
$target.data('bs.carousel').to(slideIndex)
}
e.preventDefault()
}
$(document)
.on('click.bs.carousel.data-api', '[data-slide]', clickHandler)
.on('click.bs.carousel.data-api', '[data-slide-to]', clickHandler)
$(window).on('load', function () {
$('[data-ride="carousel"]').each(function () {
var $carousel = $(this)
Plugin.call($carousel, $carousel.data())
})
})
}(jQuery);
/* ========================================================================
* Bootstrap: collapse.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#collapse
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
/* jshint latedef: false */
+function ($) {
'use strict';
// COLLAPSE PUBLIC CLASS DEFINITION
// ================================
var Collapse = function (element, options) {
this.$element = $(element)
this.options = $.extend({}, Collapse.DEFAULTS, options)
this.$trigger = $('[data-toggle="collapse"][href="#' + element.id + '"],' +
'[data-toggle="collapse"][data-target="#' + element.id + '"]')
this.transitioning = null
if (this.options.parent) {
this.$parent = this.getParent()
} else {
this.addAriaAndCollapsedClass(this.$element, this.$trigger)
}
if (this.options.toggle) this.toggle()
}
Collapse.VERSION = '3.4.1'
Collapse.TRANSITION_DURATION = 350
Collapse.DEFAULTS = {
toggle: true
}
Collapse.prototype.dimension = function () {
var hasWidth = this.$element.hasClass('width')
return hasWidth ? 'width' : 'height'
}
Collapse.prototype.show = function () {
if (this.transitioning || this.$element.hasClass('in')) return
var activesData
var actives = this.$parent && this.$parent.children('.panel').children('.in, .collapsing')
if (actives && actives.length) {
activesData = actives.data('bs.collapse')
if (activesData && activesData.transitioning) return
}
var startEvent = $.Event('show.bs.collapse')
this.$element.trigger(startEvent)
if (startEvent.isDefaultPrevented()) return
if (actives && actives.length) {
Plugin.call(actives, 'hide')
activesData || actives.data('bs.collapse', null)
}
var dimension = this.dimension()
this.$element
.removeClass('collapse')
.addClass('collapsing')[dimension](0)
.attr('aria-expanded', true)
this.$trigger
.removeClass('collapsed')
.attr('aria-expanded', true)
this.transitioning = 1
var complete = function () {
this.$element
.removeClass('collapsing')
.addClass('collapse in')[dimension]('')
this.transitioning = 0
this.$element
.trigger('shown.bs.collapse')
}
if (!$.support.transition) return complete.call(this)
var scrollSize = $.camelCase(['scroll', dimension].join('-'))
this.$element
.one('bsTransitionEnd', $.proxy(complete, this))
.emulateTransitionEnd(Collapse.TRANSITION_DURATION)[dimension](this.$element[0][scrollSize])
}
Collapse.prototype.hide = function () {
if (this.transitioning || !this.$element.hasClass('in')) return
var startEvent = $.Event('hide.bs.collapse')
this.$element.trigger(startEvent)
if (startEvent.isDefaultPrevented()) return
var dimension = this.dimension()
this.$element[dimension](this.$element[dimension]())[0].offsetHeight
this.$element
.addClass('collapsing')
.removeClass('collapse in')
.attr('aria-expanded', false)
this.$trigger
.addClass('collapsed')
.attr('aria-expanded', false)
this.transitioning = 1
var complete = function () {
this.transitioning = 0
this.$element
.removeClass('collapsing')
.addClass('collapse')
.trigger('hidden.bs.collapse')
}
if (!$.support.transition) return complete.call(this)
this.$element
[dimension](0)
.one('bsTransitionEnd', $.proxy(complete, this))
.emulateTransitionEnd(Collapse.TRANSITION_DURATION)
}
Collapse.prototype.toggle = function () {
this[this.$element.hasClass('in') ? 'hide' : 'show']()
}
Collapse.prototype.getParent = function () {
return $(document).find(this.options.parent)
.find('[data-toggle="collapse"][data-parent="' + this.options.parent + '"]')
.each($.proxy(function (i, element) {
var $element = $(element)
this.addAriaAndCollapsedClass(getTargetFromTrigger($element), $element)
}, this))
.end()
}
Collapse.prototype.addAriaAndCollapsedClass = function ($element, $trigger) {
var isOpen = $element.hasClass('in')
$element.attr('aria-expanded', isOpen)
$trigger
.toggleClass('collapsed', !isOpen)
.attr('aria-expanded', isOpen)
}
function getTargetFromTrigger($trigger) {
var href
var target = $trigger.attr('data-target')
|| (href = $trigger.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7
return $(document).find(target)
}
// COLLAPSE PLUGIN DEFINITION
// ==========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.collapse')
var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)
if (!data && options.toggle && /show|hide/.test(option)) options.toggle = false
if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.collapse
$.fn.collapse = Plugin
$.fn.collapse.Constructor = Collapse
// COLLAPSE NO CONFLICT
// ====================
$.fn.collapse.noConflict = function () {
$.fn.collapse = old
return this
}
// COLLAPSE DATA-API
// =================
$(document).on('click.bs.collapse.data-api', '[data-toggle="collapse"]', function (e) {
var $this = $(this)
if (!$this.attr('data-target')) e.preventDefault()
var $target = getTargetFromTrigger($this)
var data = $target.data('bs.collapse')
var option = data ? 'toggle' : $this.data()
Plugin.call($target, option)
})
}(jQuery);
/* ========================================================================
* Bootstrap: dropdown.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#dropdowns
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// DROPDOWN CLASS DEFINITION
// =========================
var backdrop = '.dropdown-backdrop'
var toggle = '[data-toggle="dropdown"]'
var Dropdown = function (element) {
$(element).on('click.bs.dropdown', this.toggle)
}
Dropdown.VERSION = '3.4.1'
function getParent($this) {
var selector = $this.attr('data-target')
if (!selector) {
selector = $this.attr('href')
selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
var $parent = selector !== '#' ? $(document).find(selector) : null
return $parent && $parent.length ? $parent : $this.parent()
}
function clearMenus(e) {
if (e && e.which === 3) return
$(backdrop).remove()
$(toggle).each(function () {
var $this = $(this)
var $parent = getParent($this)
var relatedTarget = { relatedTarget: this }
if (!$parent.hasClass('open')) return
if (e && e.type == 'click' && /input|textarea/i.test(e.target.tagName) && $.contains($parent[0], e.target)) return
$parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))
if (e.isDefaultPrevented()) return
$this.attr('aria-expanded', 'false')
$parent.removeClass('open').trigger($.Event('hidden.bs.dropdown', relatedTarget))
})
}
Dropdown.prototype.toggle = function (e) {
var $this = $(this)
if ($this.is('.disabled, :disabled')) return
var $parent = getParent($this)
var isActive = $parent.hasClass('open')
clearMenus()
if (!isActive) {
if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {
// if mobile we use a backdrop because click events don't delegate
$(document.createElement('div'))
.addClass('dropdown-backdrop')
.insertAfter($(this))
.on('click', clearMenus)
}
var relatedTarget = { relatedTarget: this }
$parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))
if (e.isDefaultPrevented()) return
$this
.trigger('focus')
.attr('aria-expanded', 'true')
$parent
.toggleClass('open')
.trigger($.Event('shown.bs.dropdown', relatedTarget))
}
return false
}
Dropdown.prototype.keydown = function (e) {
if (!/(38|40|27|32)/.test(e.which) || /input|textarea/i.test(e.target.tagName)) return
var $this = $(this)
e.preventDefault()
e.stopPropagation()
if ($this.is('.disabled, :disabled')) return
var $parent = getParent($this)
var isActive = $parent.hasClass('open')
if (!isActive && e.which != 27 || isActive && e.which == 27) {
if (e.which == 27) $parent.find(toggle).trigger('focus')
return $this.trigger('click')
}
var desc = ' li:not(.disabled):visible a'
var $items = $parent.find('.dropdown-menu' + desc)
if (!$items.length) return
var index = $items.index(e.target)
if (e.which == 38 && index > 0) index-- // up
if (e.which == 40 && index < $items.length - 1) index++ // down
if (!~index) index = 0
$items.eq(index).trigger('focus')
}
// DROPDOWN PLUGIN DEFINITION
// ==========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.dropdown')
if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))
if (typeof option == 'string') data[option].call($this)
})
}
var old = $.fn.dropdown
$.fn.dropdown = Plugin
$.fn.dropdown.Constructor = Dropdown
// DROPDOWN NO CONFLICT
// ====================
$.fn.dropdown.noConflict = function () {
$.fn.dropdown = old
return this
}
// APPLY TO STANDARD DROPDOWN ELEMENTS
// ===================================
$(document)
.on('click.bs.dropdown.data-api', clearMenus)
.on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
.on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)
.on('keydown.bs.dropdown.data-api', toggle, Dropdown.prototype.keydown)
.on('keydown.bs.dropdown.data-api', '.dropdown-menu', Dropdown.prototype.keydown)
}(jQuery);
/* ========================================================================
* Bootstrap: modal.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#modals
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// MODAL CLASS DEFINITION
// ======================
var Modal = function (element, options) {
this.options = options
this.$body = $(document.body)
this.$element = $(element)
this.$dialog = this.$element.find('.modal-dialog')
this.$backdrop = null
this.isShown = null
this.originalBodyPad = null
this.scrollbarWidth = 0
this.ignoreBackdropClick = false
this.fixedContent = '.navbar-fixed-top, .navbar-fixed-bottom'
if (this.options.remote) {
this.$element
.find('.modal-content')
.load(this.options.remote, $.proxy(function () {
this.$element.trigger('loaded.bs.modal')
}, this))
}
}
Modal.VERSION = '3.4.1'
Modal.TRANSITION_DURATION = 300
Modal.BACKDROP_TRANSITION_DURATION = 150
Modal.DEFAULTS = {
backdrop: true,
keyboard: true,
show: true
}
Modal.prototype.toggle = function (_relatedTarget) {
return this.isShown ? this.hide() : this.show(_relatedTarget)
}
Modal.prototype.show = function (_relatedTarget) {
var that = this
var e = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })
this.$element.trigger(e)
if (this.isShown || e.isDefaultPrevented()) return
this.isShown = true
this.checkScrollbar()
this.setScrollbar()
this.$body.addClass('modal-open')
this.escape()
this.resize()
this.$element.on('click.dismiss.bs.modal', '[data-dismiss="modal"]', $.proxy(this.hide, this))
this.$dialog.on('mousedown.dismiss.bs.modal', function () {
that.$element.one('mouseup.dismiss.bs.modal', function (e) {
if ($(e.target).is(that.$element)) that.ignoreBackdropClick = true
})
})
this.backdrop(function () {
var transition = $.support.transition && that.$element.hasClass('fade')
if (!that.$element.parent().length) {
that.$element.appendTo(that.$body) // don't move modals dom position
}
that.$element
.show()
.scrollTop(0)
that.adjustDialog()
if (transition) {
that.$element[0].offsetWidth // force reflow
}
that.$element.addClass('in')
that.enforceFocus()
var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })
transition ?
that.$dialog // wait for modal to slide in
.one('bsTransitionEnd', function () {
that.$element.trigger('focus').trigger(e)
})
.emulateTransitionEnd(Modal.TRANSITION_DURATION) :
that.$element.trigger('focus').trigger(e)
})
}
Modal.prototype.hide = function (e) {
if (e) e.preventDefault()
e = $.Event('hide.bs.modal')
this.$element.trigger(e)
if (!this.isShown || e.isDefaultPrevented()) return
this.isShown = false
this.escape()
this.resize()
$(document).off('focusin.bs.modal')
this.$element
.removeClass('in')
.off('click.dismiss.bs.modal')
.off('mouseup.dismiss.bs.modal')
this.$dialog.off('mousedown.dismiss.bs.modal')
$.support.transition && this.$element.hasClass('fade') ?
this.$element
.one('bsTransitionEnd', $.proxy(this.hideModal, this))
.emulateTransitionEnd(Modal.TRANSITION_DURATION) :
this.hideModal()
}
Modal.prototype.enforceFocus = function () {
$(document)
.off('focusin.bs.modal') // guard against infinite focus loop
.on('focusin.bs.modal', $.proxy(function (e) {
if (document !== e.target &&
this.$element[0] !== e.target &&
!this.$element.has(e.target).length) {
this.$element.trigger('focus')
}
}, this))
}
Modal.prototype.escape = function () {
if (this.isShown && this.options.keyboard) {
this.$element.on('keydown.dismiss.bs.modal', $.proxy(function (e) {
e.which == 27 && this.hide()
}, this))
} else if (!this.isShown) {
this.$element.off('keydown.dismiss.bs.modal')
}
}
Modal.prototype.resize = function () {
if (this.isShown) {
$(window).on('resize.bs.modal', $.proxy(this.handleUpdate, this))
} else {
$(window).off('resize.bs.modal')
}
}
Modal.prototype.hideModal = function () {
var that = this
this.$element.hide()
this.backdrop(function () {
that.$body.removeClass('modal-open')
that.resetAdjustments()
that.resetScrollbar()
that.$element.trigger('hidden.bs.modal')
})
}
Modal.prototype.removeBackdrop = function () {
this.$backdrop && this.$backdrop.remove()
this.$backdrop = null
}
Modal.prototype.backdrop = function (callback) {
var that = this
var animate = this.$element.hasClass('fade') ? 'fade' : ''
if (this.isShown && this.options.backdrop) {
var doAnimate = $.support.transition && animate
this.$backdrop = $(document.createElement('div'))
.addClass('modal-backdrop ' + animate)
.appendTo(this.$body)
this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {
if (this.ignoreBackdropClick) {
this.ignoreBackdropClick = false
return
}
if (e.target !== e.currentTarget) return
this.options.backdrop == 'static'
? this.$element[0].focus()
: this.hide()
}, this))
if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
this.$backdrop.addClass('in')
if (!callback) return
doAnimate ?
this.$backdrop
.one('bsTransitionEnd', callback)
.emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :
callback()
} else if (!this.isShown && this.$backdrop) {
this.$backdrop.removeClass('in')
var callbackRemove = function () {
that.removeBackdrop()
callback && callback()
}
$.support.transition && this.$element.hasClass('fade') ?
this.$backdrop
.one('bsTransitionEnd', callbackRemove)
.emulateTransitionEnd(Modal.BACKDROP_TRANSITION_DURATION) :
callbackRemove()
} else if (callback) {
callback()
}
}
// these following methods are used to handle overflowing modals
Modal.prototype.handleUpdate = function () {
this.adjustDialog()
}
Modal.prototype.adjustDialog = function () {
var modalIsOverflowing = this.$element[0].scrollHeight > document.documentElement.clientHeight
this.$element.css({
paddingLeft: !this.bodyIsOverflowing && modalIsOverflowing ? this.scrollbarWidth : '',
paddingRight: this.bodyIsOverflowing && !modalIsOverflowing ? this.scrollbarWidth : ''
})
}
Modal.prototype.resetAdjustments = function () {
this.$element.css({
paddingLeft: '',
paddingRight: ''
})
}
Modal.prototype.checkScrollbar = function () {
var fullWindowWidth = window.innerWidth
if (!fullWindowWidth) { // workaround for missing window.innerWidth in IE8
var documentElementRect = document.documentElement.getBoundingClientRect()
fullWindowWidth = documentElementRect.right - Math.abs(documentElementRect.left)
}
this.bodyIsOverflowing = document.body.clientWidth < fullWindowWidth
this.scrollbarWidth = this.measureScrollbar()
}
Modal.prototype.setScrollbar = function () {
var bodyPad = parseInt((this.$body.css('padding-right') || 0), 10)
this.originalBodyPad = document.body.style.paddingRight || ''
var scrollbarWidth = this.scrollbarWidth
if (this.bodyIsOverflowing) {
this.$body.css('padding-right', bodyPad + scrollbarWidth)
$(this.fixedContent).each(function (index, element) {
var actualPadding = element.style.paddingRight
var calculatedPadding = $(element).css('padding-right')
$(element)
.data('padding-right', actualPadding)
.css('padding-right', parseFloat(calculatedPadding) + scrollbarWidth + 'px')
})
}
}
Modal.prototype.resetScrollbar = function () {
this.$body.css('padding-right', this.originalBodyPad)
$(this.fixedContent).each(function (index, element) {
var padding = $(element).data('padding-right')
$(element).removeData('padding-right')
element.style.paddingRight = padding ? padding : ''
})
}
Modal.prototype.measureScrollbar = function () { // thx walsh
var scrollDiv = document.createElement('div')
scrollDiv.className = 'modal-scrollbar-measure'
this.$body.append(scrollDiv)
var scrollbarWidth = scrollDiv.offsetWidth - scrollDiv.clientWidth
this.$body[0].removeChild(scrollDiv)
return scrollbarWidth
}
// MODAL PLUGIN DEFINITION
// =======================
function Plugin(option, _relatedTarget) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.modal')
var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)
if (!data) $this.data('bs.modal', (data = new Modal(this, options)))
if (typeof option == 'string') data[option](_relatedTarget)
else if (options.show) data.show(_relatedTarget)
})
}
var old = $.fn.modal
$.fn.modal = Plugin
$.fn.modal.Constructor = Modal
// MODAL NO CONFLICT
// =================
$.fn.modal.noConflict = function () {
$.fn.modal = old
return this
}
// MODAL DATA-API
// ==============
$(document).on('click.bs.modal.data-api', '[data-toggle="modal"]', function (e) {
var $this = $(this)
var href = $this.attr('href')
var target = $this.attr('data-target') ||
(href && href.replace(/.*(?=#[^\s]+$)/, '')) // strip for ie7
var $target = $(document).find(target)
var option = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())
if ($this.is('a')) e.preventDefault()
$target.one('show.bs.modal', function (showEvent) {
if (showEvent.isDefaultPrevented()) return // only register focus restorer if modal will actually get shown
$target.one('hidden.bs.modal', function () {
$this.is(':visible') && $this.trigger('focus')
})
})
Plugin.call($target, option, this)
})
}(jQuery);
/* ========================================================================
* Bootstrap: tooltip.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#tooltip
* Inspired by the original jQuery.tipsy by Jason Frame
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
var DISALLOWED_ATTRIBUTES = ['sanitize', 'whiteList', 'sanitizeFn']
var uriAttrs = [
'background',
'cite',
'href',
'itemtype',
'longdesc',
'poster',
'src',
'xlink:href'
]
var ARIA_ATTRIBUTE_PATTERN = /^aria-[\w-]*$/i
var DefaultWhitelist = {
// Global attributes allowed on any supplied element below.
'*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],
a: ['target', 'href', 'title', 'rel'],
area: [],
b: [],
br: [],
col: [],
code: [],
div: [],
em: [],
hr: [],
h1: [],
h2: [],
h3: [],
h4: [],
h5: [],
h6: [],
i: [],
img: ['src', 'alt', 'title', 'width', 'height'],
li: [],
ol: [],
p: [],
pre: [],
s: [],
small: [],
span: [],
sub: [],
sup: [],
strong: [],
u: [],
ul: []
}
/**
* A pattern that recognizes a commonly useful subset of URLs that are safe.
*
* Shoutout to Angular 7 https://github.com/angular/angular/blob/7.2.4/packages/core/src/sanitization/url_sanitizer.ts
*/
var SAFE_URL_PATTERN = /^(?:(?:https?|mailto|ftp|tel|file):|[^&:/?#]*(?:[/?#]|$))/gi
/**
* A pattern that matches safe data URLs. Only matches image, video and audio types.
*
* Shoutout to Angular 7 https://github.com/angular/angular/blob/7.2.4/packages/core/src/sanitization/url_sanitizer.ts
*/
var DATA_URL_PATTERN = /^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[a-z0-9+/]+=*$/i
function allowedAttribute(attr, allowedAttributeList) {
var attrName = attr.nodeName.toLowerCase()
if ($.inArray(attrName, allowedAttributeList) !== -1) {
if ($.inArray(attrName, uriAttrs) !== -1) {
return Boolean(attr.nodeValue.match(SAFE_URL_PATTERN) || attr.nodeValue.match(DATA_URL_PATTERN))
}
return true
}
var regExp = $(allowedAttributeList).filter(function (index, value) {
return value instanceof RegExp
})
// Check if a regular expression validates the attribute.
for (var i = 0, l = regExp.length; i < l; i++) {
if (attrName.match(regExp[i])) {
return true
}
}
return false
}
function sanitizeHtml(unsafeHtml, whiteList, sanitizeFn) {
if (unsafeHtml.length === 0) {
return unsafeHtml
}
if (sanitizeFn && typeof sanitizeFn === 'function') {
return sanitizeFn(unsafeHtml)
}
// IE 8 and below don't support createHTMLDocument
if (!document.implementation || !document.implementation.createHTMLDocument) {
return unsafeHtml
}
var createdDocument = document.implementation.createHTMLDocument('sanitization')
createdDocument.body.innerHTML = unsafeHtml
var whitelistKeys = $.map(whiteList, function (el, i) { return i })
var elements = $(createdDocument.body).find('*')
for (var i = 0, len = elements.length; i < len; i++) {
var el = elements[i]
var elName = el.nodeName.toLowerCase()
if ($.inArray(elName, whitelistKeys) === -1) {
el.parentNode.removeChild(el)
continue
}
var attributeList = $.map(el.attributes, function (el) { return el })
var whitelistedAttributes = [].concat(whiteList['*'] || [], whiteList[elName] || [])
for (var j = 0, len2 = attributeList.length; j < len2; j++) {
if (!allowedAttribute(attributeList[j], whitelistedAttributes)) {
el.removeAttribute(attributeList[j].nodeName)
}
}
}
return createdDocument.body.innerHTML
}
// TOOLTIP PUBLIC CLASS DEFINITION
// ===============================
var Tooltip = function (element, options) {
this.type = null
this.options = null
this.enabled = null
this.timeout = null
this.hoverState = null
this.$element = null
this.inState = null
this.init('tooltip', element, options)
}
Tooltip.VERSION = '3.4.1'
Tooltip.TRANSITION_DURATION = 150
Tooltip.DEFAULTS = {
animation: true,
placement: 'top',
selector: false,
template: '<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
trigger: 'hover focus',
title: '',
delay: 0,
html: false,
container: false,
viewport: {
selector: 'body',
padding: 0
},
sanitize : true,
sanitizeFn : null,
whiteList : DefaultWhitelist
}
Tooltip.prototype.init = function (type, element, options) {
this.enabled = true
this.type = type
this.$element = $(element)
this.options = this.getOptions(options)
this.$viewport = this.options.viewport && $(document).find($.isFunction(this.options.viewport) ? this.options.viewport.call(this, this.$element) : (this.options.viewport.selector || this.options.viewport))
this.inState = { click: false, hover: false, focus: false }
if (this.$element[0] instanceof document.constructor && !this.options.selector) {
throw new Error('`selector` option must be specified when initializing ' + this.type + ' on the window.document object!')
}
var triggers = this.options.trigger.split(' ')
for (var i = triggers.length; i--;) {
var trigger = triggers[i]
if (trigger == 'click') {
this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
} else if (trigger != 'manual') {
var eventIn = trigger == 'hover' ? 'mouseenter' : 'focusin'
var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
this.$element.on(eventIn + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
}
}
this.options.selector ?
(this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
this.fixTitle()
}
Tooltip.prototype.getDefaults = function () {
return Tooltip.DEFAULTS
}
Tooltip.prototype.getOptions = function (options) {
var dataAttributes = this.$element.data()
for (var dataAttr in dataAttributes) {
if (dataAttributes.hasOwnProperty(dataAttr) && $.inArray(dataAttr, DISALLOWED_ATTRIBUTES) !== -1) {
delete dataAttributes[dataAttr]
}
}
options = $.extend({}, this.getDefaults(), dataAttributes, options)
if (options.delay && typeof options.delay == 'number') {
options.delay = {
show: options.delay,
hide: options.delay
}
}
if (options.sanitize) {
options.template = sanitizeHtml(options.template, options.whiteList, options.sanitizeFn)
}
return options
}
Tooltip.prototype.getDelegateOptions = function () {
var options = {}
var defaults = this.getDefaults()
this._options && $.each(this._options, function (key, value) {
if (defaults[key] != value) options[key] = value
})
return options
}
Tooltip.prototype.enter = function (obj) {
var self = obj instanceof this.constructor ?
obj : $(obj.currentTarget).data('bs.' + this.type)
if (!self) {
self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
$(obj.currentTarget).data('bs.' + this.type, self)
}
if (obj instanceof $.Event) {
self.inState[obj.type == 'focusin' ? 'focus' : 'hover'] = true
}
if (self.tip().hasClass('in') || self.hoverState == 'in') {
self.hoverState = 'in'
return
}
clearTimeout(self.timeout)
self.hoverState = 'in'
if (!self.options.delay || !self.options.delay.show) return self.show()
self.timeout = setTimeout(function () {
if (self.hoverState == 'in') self.show()
}, self.options.delay.show)
}
Tooltip.prototype.isInStateTrue = function () {
for (var key in this.inState) {
if (this.inState[key]) return true
}
return false
}
Tooltip.prototype.leave = function (obj) {
var self = obj instanceof this.constructor ?
obj : $(obj.currentTarget).data('bs.' + this.type)
if (!self) {
self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
$(obj.currentTarget).data('bs.' + this.type, self)
}
if (obj instanceof $.Event) {
self.inState[obj.type == 'focusout' ? 'focus' : 'hover'] = false
}
if (self.isInStateTrue()) return
clearTimeout(self.timeout)
self.hoverState = 'out'
if (!self.options.delay || !self.options.delay.hide) return self.hide()
self.timeout = setTimeout(function () {
if (self.hoverState == 'out') self.hide()
}, self.options.delay.hide)
}
Tooltip.prototype.show = function () {
var e = $.Event('show.bs.' + this.type)
if (this.hasContent() && this.enabled) {
this.$element.trigger(e)
var inDom = $.contains(this.$element[0].ownerDocument.documentElement, this.$element[0])
if (e.isDefaultPrevented() || !inDom) return
var that = this
var $tip = this.tip()
var tipId = this.getUID(this.type)
this.setContent()
$tip.attr('id', tipId)
this.$element.attr('aria-describedby', tipId)
if (this.options.animation) $tip.addClass('fade')
var placement = typeof this.options.placement == 'function' ?
this.options.placement.call(this, $tip[0], this.$element[0]) :
this.options.placement
var autoToken = /\s?auto?\s?/i
var autoPlace = autoToken.test(placement)
if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
$tip
.detach()
.css({ top: 0, left: 0, display: 'block' })
.addClass(placement)
.data('bs.' + this.type, this)
this.options.container ? $tip.appendTo($(document).find(this.options.container)) : $tip.insertAfter(this.$element)
this.$element.trigger('inserted.bs.' + this.type)
var pos = this.getPosition()
var actualWidth = $tip[0].offsetWidth
var actualHeight = $tip[0].offsetHeight
if (autoPlace) {
var orgPlacement = placement
var viewportDim = this.getPosition(this.$viewport)
placement = placement == 'bottom' && pos.bottom + actualHeight > viewportDim.bottom ? 'top' :
placement == 'top' && pos.top - actualHeight < viewportDim.top ? 'bottom' :
placement == 'right' && pos.right + actualWidth > viewportDim.width ? 'left' :
placement == 'left' && pos.left - actualWidth < viewportDim.left ? 'right' :
placement
$tip
.removeClass(orgPlacement)
.addClass(placement)
}
var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)
this.applyPlacement(calculatedOffset, placement)
var complete = function () {
var prevHoverState = that.hoverState
that.$element.trigger('shown.bs.' + that.type)
that.hoverState = null
if (prevHoverState == 'out') that.leave(that)
}
$.support.transition && this.$tip.hasClass('fade') ?
$tip
.one('bsTransitionEnd', complete)
.emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
complete()
}
}
Tooltip.prototype.applyPlacement = function (offset, placement) {
var $tip = this.tip()
var width = $tip[0].offsetWidth
var height = $tip[0].offsetHeight
// manually read margins because getBoundingClientRect includes difference
var marginTop = parseInt($tip.css('margin-top'), 10)
var marginLeft = parseInt($tip.css('margin-left'), 10)
// we must check for NaN for ie 8/9
if (isNaN(marginTop)) marginTop = 0
if (isNaN(marginLeft)) marginLeft = 0
offset.top += marginTop
offset.left += marginLeft
// $.fn.offset doesn't round pixel values
// so we use setOffset directly with our own function B-0
$.offset.setOffset($tip[0], $.extend({
using: function (props) {
$tip.css({
top: Math.round(props.top),
left: Math.round(props.left)
})
}
}, offset), 0)
$tip.addClass('in')
// check to see if placing tip in new offset caused the tip to resize itself
var actualWidth = $tip[0].offsetWidth
var actualHeight = $tip[0].offsetHeight
if (placement == 'top' && actualHeight != height) {
offset.top = offset.top + height - actualHeight
}
var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight)
if (delta.left) offset.left += delta.left
else offset.top += delta.top
var isVertical = /top|bottom/.test(placement)
var arrowDelta = isVertical ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight
var arrowOffsetPosition = isVertical ? 'offsetWidth' : 'offsetHeight'
$tip.offset(offset)
this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], isVertical)
}
Tooltip.prototype.replaceArrow = function (delta, dimension, isVertical) {
this.arrow()
.css(isVertical ? 'left' : 'top', 50 * (1 - delta / dimension) + '%')
.css(isVertical ? 'top' : 'left', '')
}
Tooltip.prototype.setContent = function () {
var $tip = this.tip()
var title = this.getTitle()
if (this.options.html) {
if (this.options.sanitize) {
title = sanitizeHtml(title, this.options.whiteList, this.options.sanitizeFn)
}
$tip.find('.tooltip-inner').html(title)
} else {
$tip.find('.tooltip-inner').text(title)
}
$tip.removeClass('fade in top bottom left right')
}
Tooltip.prototype.hide = function (callback) {
var that = this
var $tip = $(this.$tip)
var e = $.Event('hide.bs.' + this.type)
function complete() {
if (that.hoverState != 'in') $tip.detach()
if (that.$element) { // TODO: Check whether guarding this code with this `if` is really necessary.
that.$element
.removeAttr('aria-describedby')
.trigger('hidden.bs.' + that.type)
}
callback && callback()
}
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
$tip.removeClass('in')
$.support.transition && $tip.hasClass('fade') ?
$tip
.one('bsTransitionEnd', complete)
.emulateTransitionEnd(Tooltip.TRANSITION_DURATION) :
complete()
this.hoverState = null
return this
}
Tooltip.prototype.fixTitle = function () {
var $e = this.$element
if ($e.attr('title') || typeof $e.attr('data-original-title') != 'string') {
$e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
}
}
Tooltip.prototype.hasContent = function () {
return this.getTitle()
}
Tooltip.prototype.getPosition = function ($element) {
$element = $element || this.$element
var el = $element[0]
var isBody = el.tagName == 'BODY'
var elRect = el.getBoundingClientRect()
if (elRect.width == null) {
// width and height are missing in IE8, so compute them manually; see https://github.com/twbs/bootstrap/issues/14093
elRect = $.extend({}, elRect, { width: elRect.right - elRect.left, height: elRect.bottom - elRect.top })
}
var isSvg = window.SVGElement && el instanceof window.SVGElement
// Avoid using $.offset() on SVGs since it gives incorrect results in jQuery 3.
// See https://github.com/twbs/bootstrap/issues/20280
var elOffset = isBody ? { top: 0, left: 0 } : (isSvg ? null : $element.offset())
var scroll = { scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop() }
var outerDims = isBody ? { width: $(window).width(), height: $(window).height() } : null
return $.extend({}, elRect, scroll, outerDims, elOffset)
}
Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {
return placement == 'bottom' ? { top: pos.top + pos.height, left: pos.left + pos.width / 2 - actualWidth / 2 } :
placement == 'top' ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } :
placement == 'left' ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :
/* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width }
}
Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) {
var delta = { top: 0, left: 0 }
if (!this.$viewport) return delta
var viewportPadding = this.options.viewport && this.options.viewport.padding || 0
var viewportDimensions = this.getPosition(this.$viewport)
if (/right|left/.test(placement)) {
var topEdgeOffset = pos.top - viewportPadding - viewportDimensions.scroll
var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight
if (topEdgeOffset < viewportDimensions.top) { // top overflow
delta.top = viewportDimensions.top - topEdgeOffset
} else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow
delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset
}
} else {
var leftEdgeOffset = pos.left - viewportPadding
var rightEdgeOffset = pos.left + viewportPadding + actualWidth
if (leftEdgeOffset < viewportDimensions.left) { // left overflow
delta.left = viewportDimensions.left - leftEdgeOffset
} else if (rightEdgeOffset > viewportDimensions.right) { // right overflow
delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset
}
}
return delta
}
Tooltip.prototype.getTitle = function () {
var title
var $e = this.$element
var o = this.options
title = $e.attr('data-original-title')
|| (typeof o.title == 'function' ? o.title.call($e[0]) : o.title)
return title
}
Tooltip.prototype.getUID = function (prefix) {
do prefix += ~~(Math.random() * 1000000)
while (document.getElementById(prefix))
return prefix
}
Tooltip.prototype.tip = function () {
if (!this.$tip) {
this.$tip = $(this.options.template)
if (this.$tip.length != 1) {
throw new Error(this.type + ' `template` option must consist of exactly 1 top-level element!')
}
}
return this.$tip
}
Tooltip.prototype.arrow = function () {
return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))
}
Tooltip.prototype.enable = function () {
this.enabled = true
}
Tooltip.prototype.disable = function () {
this.enabled = false
}
Tooltip.prototype.toggleEnabled = function () {
this.enabled = !this.enabled
}
Tooltip.prototype.toggle = function (e) {
var self = this
if (e) {
self = $(e.currentTarget).data('bs.' + this.type)
if (!self) {
self = new this.constructor(e.currentTarget, this.getDelegateOptions())
$(e.currentTarget).data('bs.' + this.type, self)
}
}
if (e) {
self.inState.click = !self.inState.click
if (self.isInStateTrue()) self.enter(self)
else self.leave(self)
} else {
self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
}
}
Tooltip.prototype.destroy = function () {
var that = this
clearTimeout(this.timeout)
this.hide(function () {
that.$element.off('.' + that.type).removeData('bs.' + that.type)
if (that.$tip) {
that.$tip.detach()
}
that.$tip = null
that.$arrow = null
that.$viewport = null
that.$element = null
})
}
Tooltip.prototype.sanitizeHtml = function (unsafeHtml) {
return sanitizeHtml(unsafeHtml, this.options.whiteList, this.options.sanitizeFn)
}
// TOOLTIP PLUGIN DEFINITION
// =========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.tooltip')
var options = typeof option == 'object' && option
if (!data && /destroy|hide/.test(option)) return
if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.tooltip
$.fn.tooltip = Plugin
$.fn.tooltip.Constructor = Tooltip
// TOOLTIP NO CONFLICT
// ===================
$.fn.tooltip.noConflict = function () {
$.fn.tooltip = old
return this
}
}(jQuery);
/* ========================================================================
* Bootstrap: popover.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#popovers
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// POPOVER PUBLIC CLASS DEFINITION
// ===============================
var Popover = function (element, options) {
this.init('popover', element, options)
}
if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')
Popover.VERSION = '3.4.1'
Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {
placement: 'right',
trigger: 'click',
content: '',
template: '<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
})
// NOTE: POPOVER EXTENDS tooltip.js
// ================================
Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)
Popover.prototype.constructor = Popover
Popover.prototype.getDefaults = function () {
return Popover.DEFAULTS
}
Popover.prototype.setContent = function () {
var $tip = this.tip()
var title = this.getTitle()
var content = this.getContent()
if (this.options.html) {
var typeContent = typeof content
if (this.options.sanitize) {
title = this.sanitizeHtml(title)
if (typeContent === 'string') {
content = this.sanitizeHtml(content)
}
}
$tip.find('.popover-title').html(title)
$tip.find('.popover-content').children().detach().end()[
typeContent === 'string' ? 'html' : 'append'
](content)
} else {
$tip.find('.popover-title').text(title)
$tip.find('.popover-content').children().detach().end().text(content)
}
$tip.removeClass('fade top bottom left right in')
// IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do
// this manually by checking the contents.
if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()
}
Popover.prototype.hasContent = function () {
return this.getTitle() || this.getContent()
}
Popover.prototype.getContent = function () {
var $e = this.$element
var o = this.options
return $e.attr('data-content')
|| (typeof o.content == 'function' ?
o.content.call($e[0]) :
o.content)
}
Popover.prototype.arrow = function () {
return (this.$arrow = this.$arrow || this.tip().find('.arrow'))
}
// POPOVER PLUGIN DEFINITION
// =========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.popover')
var options = typeof option == 'object' && option
if (!data && /destroy|hide/.test(option)) return
if (!data) $this.data('bs.popover', (data = new Popover(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.popover
$.fn.popover = Plugin
$.fn.popover.Constructor = Popover
// POPOVER NO CONFLICT
// ===================
$.fn.popover.noConflict = function () {
$.fn.popover = old
return this
}
}(jQuery);
/* ========================================================================
* Bootstrap: scrollspy.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#scrollspy
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// SCROLLSPY CLASS DEFINITION
// ==========================
function ScrollSpy(element, options) {
this.$body = $(document.body)
this.$scrollElement = $(element).is(document.body) ? $(window) : $(element)
this.options = $.extend({}, ScrollSpy.DEFAULTS, options)
this.selector = (this.options.target || '') + ' .nav li > a'
this.offsets = []
this.targets = []
this.activeTarget = null
this.scrollHeight = 0
this.$scrollElement.on('scroll.bs.scrollspy', $.proxy(this.process, this))
this.refresh()
this.process()
}
ScrollSpy.VERSION = '3.4.1'
ScrollSpy.DEFAULTS = {
offset: 10
}
ScrollSpy.prototype.getScrollHeight = function () {
return this.$scrollElement[0].scrollHeight || Math.max(this.$body[0].scrollHeight, document.documentElement.scrollHeight)
}
ScrollSpy.prototype.refresh = function () {
var that = this
var offsetMethod = 'offset'
var offsetBase = 0
this.offsets = []
this.targets = []
this.scrollHeight = this.getScrollHeight()
if (!$.isWindow(this.$scrollElement[0])) {
offsetMethod = 'position'
offsetBase = this.$scrollElement.scrollTop()
}
this.$body
.find(this.selector)
.map(function () {
var $el = $(this)
var href = $el.data('target') || $el.attr('href')
var $href = /^#./.test(href) && $(href)
return ($href
&& $href.length
&& $href.is(':visible')
&& [[$href[offsetMethod]().top + offsetBase, href]]) || null
})
.sort(function (a, b) { return a[0] - b[0] })
.each(function () {
that.offsets.push(this[0])
that.targets.push(this[1])
})
}
ScrollSpy.prototype.process = function () {
var scrollTop = this.$scrollElement.scrollTop() + this.options.offset
var scrollHeight = this.getScrollHeight()
var maxScroll = this.options.offset + scrollHeight - this.$scrollElement.height()
var offsets = this.offsets
var targets = this.targets
var activeTarget = this.activeTarget
var i
if (this.scrollHeight != scrollHeight) {
this.refresh()
}
if (scrollTop >= maxScroll) {
return activeTarget != (i = targets[targets.length - 1]) && this.activate(i)
}
if (activeTarget && scrollTop < offsets[0]) {
this.activeTarget = null
return this.clear()
}
for (i = offsets.length; i--;) {
activeTarget != targets[i]
&& scrollTop >= offsets[i]
&& (offsets[i + 1] === undefined || scrollTop < offsets[i + 1])
&& this.activate(targets[i])
}
}
ScrollSpy.prototype.activate = function (target) {
this.activeTarget = target
this.clear()
var selector = this.selector +
'[data-target="' + target + '"],' +
this.selector + '[href="' + target + '"]'
var active = $(selector)
.parents('li')
.addClass('active')
if (active.parent('.dropdown-menu').length) {
active = active
.closest('li.dropdown')
.addClass('active')
}
active.trigger('activate.bs.scrollspy')
}
ScrollSpy.prototype.clear = function () {
$(this.selector)
.parentsUntil(this.options.target, '.active')
.removeClass('active')
}
// SCROLLSPY PLUGIN DEFINITION
// ===========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.scrollspy')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.scrollspy
$.fn.scrollspy = Plugin
$.fn.scrollspy.Constructor = ScrollSpy
// SCROLLSPY NO CONFLICT
// =====================
$.fn.scrollspy.noConflict = function () {
$.fn.scrollspy = old
return this
}
// SCROLLSPY DATA-API
// ==================
$(window).on('load.bs.scrollspy.data-api', function () {
$('[data-spy="scroll"]').each(function () {
var $spy = $(this)
Plugin.call($spy, $spy.data())
})
})
}(jQuery);
/* ========================================================================
* Bootstrap: tab.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#tabs
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// TAB CLASS DEFINITION
// ====================
var Tab = function (element) {
// jscs:disable requireDollarBeforejQueryAssignment
this.element = $(element)
// jscs:enable requireDollarBeforejQueryAssignment
}
Tab.VERSION = '3.4.1'
Tab.TRANSITION_DURATION = 150
Tab.prototype.show = function () {
var $this = this.element
var $ul = $this.closest('ul:not(.dropdown-menu)')
var selector = $this.data('target')
if (!selector) {
selector = $this.attr('href')
selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
if ($this.parent('li').hasClass('active')) return
var $previous = $ul.find('.active:last a')
var hideEvent = $.Event('hide.bs.tab', {
relatedTarget: $this[0]
})
var showEvent = $.Event('show.bs.tab', {
relatedTarget: $previous[0]
})
$previous.trigger(hideEvent)
$this.trigger(showEvent)
if (showEvent.isDefaultPrevented() || hideEvent.isDefaultPrevented()) return
var $target = $(document).find(selector)
this.activate($this.closest('li'), $ul)
this.activate($target, $target.parent(), function () {
$previous.trigger({
type: 'hidden.bs.tab',
relatedTarget: $this[0]
})
$this.trigger({
type: 'shown.bs.tab',
relatedTarget: $previous[0]
})
})
}
Tab.prototype.activate = function (element, container, callback) {
var $active = container.find('> .active')
var transition = callback
&& $.support.transition
&& ($active.length && $active.hasClass('fade') || !!container.find('> .fade').length)
function next() {
$active
.removeClass('active')
.find('> .dropdown-menu > .active')
.removeClass('active')
.end()
.find('[data-toggle="tab"]')
.attr('aria-expanded', false)
element
.addClass('active')
.find('[data-toggle="tab"]')
.attr('aria-expanded', true)
if (transition) {
element[0].offsetWidth // reflow for transition
element.addClass('in')
} else {
element.removeClass('fade')
}
if (element.parent('.dropdown-menu').length) {
element
.closest('li.dropdown')
.addClass('active')
.end()
.find('[data-toggle="tab"]')
.attr('aria-expanded', true)
}
callback && callback()
}
$active.length && transition ?
$active
.one('bsTransitionEnd', next)
.emulateTransitionEnd(Tab.TRANSITION_DURATION) :
next()
$active.removeClass('in')
}
// TAB PLUGIN DEFINITION
// =====================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.tab')
if (!data) $this.data('bs.tab', (data = new Tab(this)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.tab
$.fn.tab = Plugin
$.fn.tab.Constructor = Tab
// TAB NO CONFLICT
// ===============
$.fn.tab.noConflict = function () {
$.fn.tab = old
return this
}
// TAB DATA-API
// ============
var clickHandler = function (e) {
e.preventDefault()
Plugin.call($(this), 'show')
}
$(document)
.on('click.bs.tab.data-api', '[data-toggle="tab"]', clickHandler)
.on('click.bs.tab.data-api', '[data-toggle="pill"]', clickHandler)
}(jQuery);
/* ========================================================================
* Bootstrap: affix.js v3.4.1
* https://getbootstrap.com/docs/3.4/javascript/#affix
* ========================================================================
* Copyright 2011-2019 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// AFFIX CLASS DEFINITION
// ======================
var Affix = function (element, options) {
this.options = $.extend({}, Affix.DEFAULTS, options)
var target = this.options.target === Affix.DEFAULTS.target ? $(this.options.target) : $(document).find(this.options.target)
this.$target = target
.on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))
.on('click.bs.affix.data-api', $.proxy(this.checkPositionWithEventLoop, this))
this.$element = $(element)
this.affixed = null
this.unpin = null
this.pinnedOffset = null
this.checkPosition()
}
Affix.VERSION = '3.4.1'
Affix.RESET = 'affix affix-top affix-bottom'
Affix.DEFAULTS = {
offset: 0,
target: window
}
Affix.prototype.getState = function (scrollHeight, height, offsetTop, offsetBottom) {
var scrollTop = this.$target.scrollTop()
var position = this.$element.offset()
var targetHeight = this.$target.height()
if (offsetTop != null && this.affixed == 'top') return scrollTop < offsetTop ? 'top' : false
if (this.affixed == 'bottom') {
if (offsetTop != null) return (scrollTop + this.unpin <= position.top) ? false : 'bottom'
return (scrollTop + targetHeight <= scrollHeight - offsetBottom) ? false : 'bottom'
}
var initializing = this.affixed == null
var colliderTop = initializing ? scrollTop : position.top
var colliderHeight = initializing ? targetHeight : height
if (offsetTop != null && scrollTop <= offsetTop) return 'top'
if (offsetBottom != null && (colliderTop + colliderHeight >= scrollHeight - offsetBottom)) return 'bottom'
return false
}
Affix.prototype.getPinnedOffset = function () {
if (this.pinnedOffset) return this.pinnedOffset
this.$element.removeClass(Affix.RESET).addClass('affix')
var scrollTop = this.$target.scrollTop()
var position = this.$element.offset()
return (this.pinnedOffset = position.top - scrollTop)
}
Affix.prototype.checkPositionWithEventLoop = function () {
setTimeout($.proxy(this.checkPosition, this), 1)
}
Affix.prototype.checkPosition = function () {
if (!this.$element.is(':visible')) return
var height = this.$element.height()
var offset = this.options.offset
var offsetTop = offset.top
var offsetBottom = offset.bottom
var scrollHeight = Math.max($(document).height(), $(document.body).height())
if (typeof offset != 'object') offsetBottom = offsetTop = offset
if (typeof offsetTop == 'function') offsetTop = offset.top(this.$element)
if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)
var affix = this.getState(scrollHeight, height, offsetTop, offsetBottom)
if (this.affixed != affix) {
if (this.unpin != null) this.$element.css('top', '')
var affixType = 'affix' + (affix ? '-' + affix : '')
var e = $.Event(affixType + '.bs.affix')
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
this.affixed = affix
this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null
this.$element
.removeClass(Affix.RESET)
.addClass(affixType)
.trigger(affixType.replace('affix', 'affixed') + '.bs.affix')
}
if (affix == 'bottom') {
this.$element.offset({
top: scrollHeight - height - offsetBottom
})
}
}
// AFFIX PLUGIN DEFINITION
// =======================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.affix')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.affix', (data = new Affix(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.affix
$.fn.affix = Plugin
$.fn.affix.Constructor = Affix
// AFFIX NO CONFLICT
// =================
$.fn.affix.noConflict = function () {
$.fn.affix = old
return this
}
// AFFIX DATA-API
// ==============
$(window).on('load', function () {
$('[data-spy="affix"]').each(function () {
var $spy = $(this)
var data = $spy.data()
data.offset = data.offset || {}
if (data.offsetBottom != null) data.offset.bottom = data.offsetBottom
if (data.offsetTop != null) data.offset.top = data.offsetTop
Plugin.call($spy, data)
})
})
}(jQuery); | PypiClean |
/MAZAlib-0.0.24.tar.gz/MAZAlib-0.0.24/README.md | # MAZAlib
Cross-platform 2d/3d image segmentation C++ library
Compliles with: MSVC 14.0 and later, GCC 9.2. Other GCC: not tested yet.
Compatible with C++14, C++11 - not tested
Authors: Roman V. Vasilyev, Timofey Sizonenko, Kirill M. Gerke, Marina V. Karsanina
Moscow, 2017-2021
## Prerequisites
1. Install CMake 3.13 or later. (Or you may lower version requirements by hand to your actual version in all CMakeFiles.txt and hopefully it would work, just not tested yet).
Linux (Ubuntu): sudo apt-get install cmake
Windows: https://cmake.org/download/, add cmake into PATH system variable during installation
2. Install a modern C++ compiler.
Linux (Ubuntu): sudo apt-get install g++
Windows: Visual Studio 2015 or later with C++ tools installed
3. Optionally, for hardware support of non-local means denoising, CUDA-enabled GPU and CUDA toolkit installed
## Building
1. (Optionally) make a build subdirectory and move there, for example "build_release"
2. run cmake <relative path to project>, for example "cmake .." inside build directory. If you're building project with CUDA support, you can specify compute capability of your GPU (to define the one, go to NVIDIA web site). For example, GeForce GTX 1080Ti has compute capability 6.1, then use following command: "cmake DCMAKE_CUDA_FLAGS="-arch=sm61" .."
3. If you have CUDA compiler and NVIDIA GPU, the project automatically configures itself to use GPU for NLM denoising. Otherwise, CPU host code will be used
4. Then under Linux just run "make", under Windows open a generated solution file using Visual Studio and build "segmentation_test_exe" project. A library and "segmentation_probny_loshar.exe" will be compiled and built
5. Run segmentation_probny_loshar under console to check that all is OK
## Config file structure
width height depth
radius VarMethod CorMethod OutFormat
LowThreshold HighThreshold
binary_input_file_name
| PypiClean |
/Hive_ML-1.0.1.tar.gz/Hive_ML-1.0.1/Hive_ML_scripts/Hive_ML_model_fitting.py |
import datetime
import importlib.resources
import json
import os
import warnings
from argparse import ArgumentParser, RawTextHelpFormatter
from pathlib import Path
from textwrap import dedent
import numpy as np
import pandas as pd
import plotly.express as px
from Hive.utils.log_utils import (
get_logger,
add_verbosity_options_to_argparser,
log_lvl_from_verbosity_args,
)
from joblib import parallel_backend
warnings.simplefilter(action='ignore', category=FutureWarning)
from tqdm.notebook import tqdm
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
from sklearn.decomposition import PCA
import Hive_ML.configs
from Hive_ML.data_loader.feature_loader import load_feature_set
from Hive_ML.training.model_trainer import model_fit_and_predict
from Hive_ML.training.models import adab_tree, random_forest, knn, decicion_tree, lda, qda, naive, svm_kernel, \
logistic_regression, ridge, mlp
from Hive_ML.utilities.feature_utils import data_shuffling, feature_normalization, prepare_features
from Hive_ML.evaluation.model_evaluation import select_best_classifiers, evaluate_classifiers
TIMESTAMP = "{:%Y-%m-%d_%H-%M-%S}".format(datetime.datetime.now())
COMPOSED_METRICS = {
"sensitivity": lambda x: x["1"]["recall"],
"specificity": lambda x: x["0"]["recall"]
}
MODELS = {
"rf": random_forest,
"adab": adab_tree,
"lda": lda,
"qda": qda,
"logistic_regression": logistic_regression,
"knn": knn,
"naive": naive,
"decision_tree": decicion_tree,
"svm": svm_kernel,
"ridge": ridge,
"mlp": mlp
}
DESC = dedent(
"""
Script to run 5-CV Forward Model Fitting (after performing Feature Selection) on a Feature Set. The Metrics evaluation
summary (in Excel format) is saved in the experiment folder, defined by the ``experiment_name`` argument.
""" # noqa: E501
)
EPILOG = dedent(
"""
Example call:
::
{filename} -feature-file /path/to/feature_table.csv --config-file config_file.json --experiment-name Radiomics
""".format( # noqa: E501
filename=Path(__file__).name
)
)
import warnings
warnings.filterwarnings("ignore")
def get_arg_parser():
pars = ArgumentParser(description=DESC, epilog=EPILOG, formatter_class=RawTextHelpFormatter)
pars.add_argument(
"--feature-file",
type=str,
required=True,
help="Input Dataset folder",
)
pars.add_argument(
"--config-file",
type=str,
required=True,
help="Configuration JSON file with experiment and dataset parameters.",
)
pars.add_argument(
"--experiment-name",
type=str,
required=True,
help="Experiment name used to save the model fitting metrics evaluation summary.",
)
add_verbosity_options_to_argparser(pars)
return pars
def main():
parser = get_arg_parser()
arguments = vars(parser.parse_args())
logger = get_logger(
name=Path(__file__).name,
level=log_lvl_from_verbosity_args(arguments),
)
try:
with open(arguments["config_file"]) as json_file:
config_dict = json.load(json_file)
except FileNotFoundError:
with importlib.resources.path(Hive_ML.configs, arguments["config_file"]) as json_path:
with open(json_path) as json_file:
config_dict = json.load(json_file)
models = config_dict["models"]
metrics = ["accuracy", "roc_auc", "specificity", "sensitivity"]
aggregation = "Flat"
stats_4D = False
flatten_features = True
if "feature_aggregator" in config_dict:
aggregation = config_dict["feature_aggregator"]
if aggregation != "Flat":
stats_4D = True
flatten_features = False
elif aggregation.endswith("Norm"):
stats_4D = False
flatten_features = False
feature_set, subject_ids, subject_labels, feature_names, mean_features, sum_features, std_features, mean_delta_features = load_feature_set(
arguments["feature_file"],
get_4D_stats=stats_4D,
flatten_features=flatten_features)
if aggregation == "Flat":
features = feature_set
elif aggregation == "Mean":
features = mean_features
elif aggregation == "SD":
features = std_features
elif aggregation == "Sum":
features = sum_features
elif aggregation == "Delta":
features = mean_delta_features
label_set = np.array(subject_labels)
if aggregation.endswith("Norm"):
features = feature_set
feature_set_3D = np.array(features).squeeze(-2)
train_feature_set, train_label_set, test_feature_set, test_label_set = data_shuffling(
np.swapaxes(feature_set_3D, 0, 1), label_set, config_dict["random_seed"])
else:
n_features = features.shape[1]
n_subjects = features.shape[0]
filtered_feature_set = []
filtered_feature_names = []
features = np.nan_to_num(features)
for feature in range(n_features):
exclude = False
for feature_val in np.unique(features[:, feature]):
if (np.count_nonzero(features[:, feature] == feature_val) / n_subjects) > 0.5:
exclude = True
print("Excluding:", feature_names[feature])
break
if not exclude:
filtered_feature_set.append(list(features[:, feature]))
filtered_feature_names.append(feature_names[feature])
feature_set = np.vstack(filtered_feature_set).T
feature_names = filtered_feature_names
print("# Features: {}".format(feature_set.shape[1]))
print("# Labels: {}".format(label_set.shape))
train_feature_set, train_label_set, test_feature_set, test_label_set = data_shuffling(feature_set, label_set,
config_dict[
"random_seed"])
experiment_name = arguments["experiment_name"]
experiment_dir = Path(os.environ["ROOT_FOLDER"]).joinpath(
experiment_name, config_dict["feature_selection"],
aggregation,
"FS")
experiment_dir.mkdir(parents=True, exist_ok=True)
n_features = config_dict["n_features"]
if n_features > train_feature_set.shape[1]:
n_features = train_feature_set.shape[1]
n_iterations = 0
for classifier in models:
if classifier in ["rf", "adab"]:
n_iterations += config_dict["n_folds"]
else:
n_iterations += config_dict["n_folds"] * n_features
if config_dict["feature_selection"] == "SFFS":
with open(Path(os.environ["ROOT_FOLDER"]).joinpath(
experiment_name,
config_dict["feature_selection"],
aggregation, "FS",
f"{experiment_name}_FS_summary.json"),
'rb') as fp:
feature_selection = json.load(fp)
pbar = tqdm(total=n_iterations)
df_summary = []
with parallel_backend('loky', n_jobs=-1):
for classifier in models:
pbar.set_description(f"{classifier}, Model Fitting")
if classifier in ["rf", "adab"]:
n_features = "All"
else:
n_features = config_dict["n_features"]
if n_features > train_feature_set.shape[-1]:
n_features = train_feature_set.shape[-1]
if n_features != "All":
for n_feature in range(1, n_features + 1):
kf = StratifiedKFold(n_splits=config_dict["n_folds"], random_state=config_dict["random_seed"],
shuffle=True)
for fold, (train_index, val_index) in enumerate(kf.split(train_feature_set, train_label_set)):
x_train, y_train, x_val, y_val = prepare_features(train_feature_set, train_label_set,
train_index, aggregation, val_index)
if config_dict["feature_selection"] == "SFFS":
selected_features = feature_selection[classifier][str(fold)][str(n_feature)][
"feature_names"]
train_feature_name_list = list(feature_names)
feature_idx = []
for selected_feature in selected_features:
feature_idx.append(train_feature_name_list.index(selected_feature))
x_train = x_train[:, feature_idx]
x_val = x_val[:, feature_idx]
elif config_dict["feature_selection"] == "PCA":
pca = PCA(n_components=n_features)
x_train = pca.fit_transform(x_train)
x_val = pca.transform(x_val)
x_train, x_val, _ = feature_normalization(x_train, x_val)
clf = MODELS[classifier](**models[classifier], random_state=config_dict["random_seed"])
y_val_pred = model_fit_and_predict(clf, x_train, y_train, x_val)
roc_auc_val = roc_auc_score(y_val, y_val_pred[:, 1])
report = classification_report(y_val,
np.where(y_val_pred[:, 1] > 0.5, 1, 0), output_dict=True)
report["roc_auc"] = roc_auc_val
for metric in metrics:
if metric not in report:
report[metric] = COMPOSED_METRICS[metric](report)
df_summary.append(
{"Value": report[metric], "Classifier": classifier, "Metric": metric,
"Fold": str(fold),
"N_Features": n_feature,
"Experiment": experiment_name + "_" + config_dict[
"feature_selection"] + "_" + aggregation
})
pbar.update(1)
else:
kf = StratifiedKFold(n_splits=config_dict["n_folds"], random_state=config_dict["random_seed"],
shuffle=True)
for fold, (train_index, val_index) in enumerate(kf.split(train_feature_set, train_label_set)):
x_train, y_train, x_val, y_val = prepare_features(train_feature_set, train_label_set, train_index,
aggregation, val_index)
x_train, x_val, _ = feature_normalization(x_train, x_val)
clf = MODELS[classifier](**models[classifier], random_state=config_dict["random_seed"])
y_val_pred = model_fit_and_predict(clf, x_train, y_train, x_val)
roc_auc_val = roc_auc_score(y_val, y_val_pred[:, 1])
report = classification_report(y_val,
np.where(y_val_pred[:, 1] > 0.5, 1, 0), output_dict=True)
report["roc_auc"] = roc_auc_val
for metric in metrics:
if metric not in report:
report[metric] = COMPOSED_METRICS[metric](report)
df_summary.append(
{"Value": report[metric], "Classifier": classifier, "Metric": metric,
"Fold": str(fold),
"N_Features": "All",
"Experiment": experiment_name + "_" + config_dict["feature_selection"] + "_" + aggregation
})
pbar.update(1)
df_summary = pd.DataFrame.from_records(df_summary)
feature_selection_method = config_dict["feature_selection"]
df_summary.to_excel(Path(os.environ["ROOT_FOLDER"]).joinpath(
experiment_name, experiment_name + "_" + feature_selection_method + f"_{aggregation}.xlsx"))
df_summary_all = df_summary[df_summary["N_Features"] == "All"]
df_summary_all = df_summary_all.drop(["Fold"], axis=1)
df_summary = df_summary[df_summary["N_Features"] != "All"]
df_summary = df_summary[df_summary["N_Features"] <= 15]
df_summary = df_summary.drop(["Fold"], axis=1)
df_summary = pd.concat([df_summary, df_summary_all])
visualizers = {
"Report": {"support": True,
"classes": [config_dict["label_dict"][key] for key in config_dict["label_dict"]]},
"ROCAUC": {"micro": False, "macro": False, "per_class": False,
"classes": [config_dict["label_dict"][key] for key in config_dict["label_dict"]]},
"PR": {},
"CPE": {"classes": [config_dict["label_dict"][key] for key in config_dict["label_dict"]]},
"DT": {}
}
metric = config_dict["metric_best_model"]
reduction = config_dict["reduction_best_model"]
plot_title = f"{experiment_name} SFFS {aggregation}"
val_scores = []
features_classifiers, scores = select_best_classifiers(df_summary, metric, reduction, 1)
val_scores.append(
{"Metric": metric,
"Experiment": experiment_name,
"Score": scores[0], "Section": f"Validation Set [5-CV {reduction.capitalize()}]"},
)
for k in config_dict["k_ensemble"]:
features_classifiers, scores = select_best_classifiers(df_summary, metric, reduction, k)
classifiers = [classifier for n_features, classifier in features_classifiers]
n_feature_list = [n_features for n_features, classifier in features_classifiers]
classifier_kwargs_list = [models[classifier] for classifier in classifiers]
ensemble_weights = scores
ensemble_configuration_df = []
for classifier, n_features, weight in zip(classifiers, n_feature_list, ensemble_weights):
ensemble_configuration_df.append({"Classifier": classifier,
"N_Features": n_features,
"weight": weight})
ensemble_configuration = pd.DataFrame.from_records(ensemble_configuration_df)
print(ensemble_configuration)
output_file = str(Path(os.environ["ROOT_FOLDER"]).joinpath(
experiment_name,
f"{experiment_name} {feature_selection_method} {aggregation} {reduction}_{k}.png"))
report = evaluate_classifiers(ensemble_configuration, classifier_kwargs_list,
train_feature_set, train_label_set, test_feature_set, test_label_set,
aggregation, feature_selection, visualizers, output_file, plot_title,
config_dict["random_seed"])
roc_auc_val = report[metric]
val_scores.append(
{"Metric": metric,
"Experiment": experiment_name,
"Score": roc_auc_val, "Section": f"Test Set [k={k}]"})
val_scores = pd.DataFrame.from_records(val_scores)
val_scores.to_excel(Path(os.environ["ROOT_FOLDER"]).joinpath(experiment_name, f"{plot_title}.xlsx"))
fig = px.bar(val_scores, x='Section', y='Score', color="Experiment", text_auto=True, title=plot_title,
barmode='group')
fig.write_image(Path(os.environ["ROOT_FOLDER"]).joinpath(experiment_name, f"{plot_title}.svg"))
if __name__ == "__main__":
main() | PypiClean |
/CNFgen-0.9.2-py3-none-any.whl/cnfgen/__init__.py | # Basic CNF object
from cnfgen.formula.cnf import CNF
# Graph IO functions
from cnfgen.graphs import readGraph, writeGraph
from cnfgen.graphs import supported_graph_formats
from cnfgen.graphs import Graph
from cnfgen.graphs import DirectedGraph
from cnfgen.graphs import BipartiteGraph
# SAT solvers
from cnfgen.utils.solver import supported_satsolvers
from cnfgen.utils.solver import some_solver_installed
# Formula families implemented
from cnfgen.families.cliquecoloring import CliqueColoring
from cnfgen.families.coloring import GraphColoringFormula
from cnfgen.families.coloring import EvenColoringFormula
from cnfgen.families.counting import CountingPrinciple
from cnfgen.families.counting import PerfectMatchingPrinciple
from cnfgen.families.dominatingset import DominatingSet
from cnfgen.families.dominatingset import Tiling
from cnfgen.families.graphisomorphism import GraphIsomorphism
from cnfgen.families.graphisomorphism import GraphAutomorphism
from cnfgen.families.ordering import OrderingPrinciple
from cnfgen.families.ordering import GraphOrderingPrinciple
from cnfgen.families.pebbling import PebblingFormula
from cnfgen.families.pebbling import StoneFormula
from cnfgen.families.pebbling import SparseStoneFormula
from cnfgen.families.pigeonhole import PigeonholePrinciple
from cnfgen.families.pigeonhole import GraphPigeonholePrinciple
from cnfgen.families.pigeonhole import BinaryPigeonholePrinciple
from cnfgen.families.pigeonhole import RelativizedPigeonholePrinciple
from cnfgen.families.ramsey import RamseyNumber
from cnfgen.families.ramsey import PythagoreanTriples
from cnfgen.families.ramsey import VanDerWaerden
from cnfgen.families.randomformulas import RandomKCNF
from cnfgen.families.randomkxor import RandomKXOR
from cnfgen.families.subgraph import SubgraphFormula
from cnfgen.families.subgraph import CliqueFormula
from cnfgen.families.subgraph import BinaryCliqueFormula
from cnfgen.families.subgraph import RamseyWitnessFormula
from cnfgen.families.subsetcardinality import SubsetCardinalityFormula
from cnfgen.families.tseitin import TseitinFormula
from cnfgen.families.pitfall import PitfallFormula
from cnfgen.families.cpls import CPLSFormula
# Formula transformation implemented
from cnfgen.transformations.substitutions import AllEqualSubstitution
from cnfgen.transformations.substitutions import ExactlyOneSubstitution
from cnfgen.transformations.substitutions import ExactlyKSubstitution
from cnfgen.transformations.substitutions import AnythingButKSubstitution
from cnfgen.transformations.substitutions import AtLeastKSubstitution
from cnfgen.transformations.substitutions import AtMostKSubstitution
from cnfgen.transformations.substitutions import FlipPolarity
from cnfgen.transformations.substitutions import FormulaLifting
from cnfgen.transformations.substitutions import IfThenElseSubstitution
from cnfgen.transformations.substitutions import MajoritySubstitution
from cnfgen.transformations.substitutions import NotAllEqualSubstitution
from cnfgen.transformations.substitutions import OrSubstitution
from cnfgen.transformations.substitutions import VariableCompression
from cnfgen.transformations.substitutions import XorSubstitution
from cnfgen.transformations.shuffle import Shuffle
# Main Command Line tool
from cnfgen.clitools import cnfgen | PypiClean |
/LiteMap-0.2.1.tar.gz/LiteMap-0.2.1/litemap.py |
import sqlite3
import collections
import threading
__all__ = ['LiteMap']
class LiteMap(collections.MutableMapping):
"""Persistant mapping class backed by SQLite.
Only capable of mapping strings to strings; everything will be cast to a
buffer on its way into the database and back to a str on the way out.
"""
def __init__(self, path, table='__main__'):
self._path = path
self._table = self._escape(table)
self._local = threading.local()
with self._conn:
cur = self._conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS %s (
key STRING UNIQUE ON CONFLICT REPLACE,
value BLOB
)''' % self._table)
index_name = self._escape(table + '_index')
cur.execute('''CREATE INDEX IF NOT EXISTS %s on %s (key)''' % (index_name, self._table))
def _escape(self, v):
"""Escapes a SQLite identifier."""
# HACK: there must be a better way to do this (but this does appear to
# work just fine as long as there are no null byte).
return '"%s"' % v.replace('"', '""')
@property
def _conn(self):
conn = getattr(self._local, 'conn', None)
if not hasattr(self._local, 'conn'):
self._local.conn = sqlite3.connect(self._path)
self._local.conn.text_factory = str
return self._local.conn
# Overide these in child classes to change the serializing behaviour. By
# dumping everything to a buffer SQLite will store the data as a BLOB,
# therefore preserving binary data. If it was stored as a STRING then it
# would truncate at the first null byte.
_dump_key = buffer
_load_key = str
_dump_value = buffer
_load_value = str
def setmany(self, items):
with self._conn:
self._conn.executemany('''INSERT INTO %s VALUES (?, ?)''' % self._table, (
(self._dump_key(key), self._dump_value(value)) for key, value in items
))
def __setitem__(self, key, value):
self.setmany([(key, value)])
def __getitem__(self, key):
cur = self._conn.cursor()
cur.execute('''SELECT value FROM %s WHERE key = ?''' % self._table, (self._dump_key(key), ))
res = cur.fetchone()
if not res:
raise KeyError(key)
return self._load_value(res[0])
def __contains__(self, key):
cur = self._conn.cursor()
cur.execute('''SELECT COUNT(*) FROM %s WHERE key = ?''' % self._table, (self._dump_key(key), ))
res = cur.fetchone()
return bool(res[0])
def __delitem__(self, key):
cur = self._conn.cursor()
with self._conn:
cur.execute('''DELETE FROM %s WHERE key = ?''' % self._table, (self._dump_key(key), ))
if not cur.rowcount:
raise KeyError(key)
def clear(self):
with self._conn:
self._conn.execute('''DELETE FROM %s''' % self._table)
def __len__(self):
with self._conn:
cur = self._conn.cursor()
cur.execute('''SELECT count(*) FROM %s''' % self._table)
return cur.fetchone()[0]
def iteritems(self):
cur = self._conn.cursor()
cur.execute('''SELECT key, value FROM %s''' % self._table)
for row in cur:
yield self._load_key(row[0]), self._load_value(row[1])
def __iter__(self):
cur = self._conn.cursor()
cur.execute('''SELECT key FROM %s''' % self._table)
for row in cur:
yield self._load_key(row[0])
iterkeys = __iter__
def itervalues(self):
cur = self._conn.cursor()
cur.execute('''SELECT value FROM %s''' % self._table)
for row in cur:
yield self._load_value(row[0])
items = lambda self: list(self.iteritems())
keys = lambda self: list(self.iterkeys())
values = lambda self: list(self.itervalues())
def update(self, *args, **kwargs):
self.setmany(self._update_iter(args, kwargs))
def _update_iter(self, args, kwargs):
"""A generator to turn the args/kwargs of the update method into items.
This is written in the spirit of the documentation for the dict.update
method.
"""
for arg in args:
if hasattr(arg, 'keys'):
for key in arg.keys():
yield (key, arg[key])
else:
for item in arg:
yield item
for item in kwargs.iteritems():
yield item | PypiClean |
/DimStore-0.1.1-py3-none-any.whl/dimsum/providers/dataframe/converter/pandas_converter.py | from pandas.core.frame import DataFrame
from pyspark import SparkContext, SQLContext
from dimsum.providers.dataframe.converter.converter_base import ConverterBase
class PandasConverter(ConverterBase):
# class attributes
__support__ = {('pandas','pandas'),
('pandas','pyspark')}
"""
"
" report conversion capability
"
"""
@classmethod
def is_capable(cls, in_type, out_type):
"""
@param::in_type: the type of input dataframe in string
@param::out_type: the type of output dataframe in string
return boolean value indicates whether or not the conversion is supported
"""
return (in_type.lower(),out_type.lower()) in cls.__support__
"""
"
" perform the dataframe conversion
"
"""
@classmethod
def astype(cls,df,out_type,**kwargs):
"""
@param::out_type: the type of output dataframe in string
return the converted dataframe or None if not feasible
"""
# handle edge cases
if not isinstance(df,DataFrame):
raise Exception('> PandasConverter astype(): input dataframe must be instance of pyspark dataframe class.')
if out_type == None:
raise ValueError('> PandasConverter astype(): dataframe out_type parameter can not be none.')
if not cls.is_capable('pandas',out_type):
raise Exception('> PandasConverter astype(): convert to type: %s not supported.'%(out_type))
# get pyspark context
sc = SparkContext.getOrCreate()
sqlcontext=SQLContext(sc)
# convert to target type
if out_type.lower() == 'pyspark': # explicitly intended
try:
return sqlcontext.createDataFrame(df)
except Exception as e:
print('> PandasConverter astype(): convert to pyspark dataframe failed: %s'%(e))
if out_type.lower() == 'pandas': # explicitly intended
return df
return None
"""
"
" report the capability of convert operation
"
"""
@classmethod
def info(cls):
"""
@param: empty intended
return list of possbile join combinations
"""
return ["[%s] => [%s]"%(i,o) for i,o in cls.__support__] | PypiClean |
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/interface/base/math_expression_painter.py | import matplotlib as mpl
from PyQt6.QtGui import QFont, QImage
from matplotlib.figure import Figure
from matplotlib.backends.backend_qtagg import FigureCanvasQTAgg
def generateMathExpression(mathText):
"""This function is not used for the moment
but can be used as replacement of mathTex_to_QPixmap"""
figure = Figure(edgecolor=(0, 0, 0), facecolor=(0, 0, 0))
canvas = FigureCanvasQTAgg(figure)
# l.addWidget(self._canvas)
figure.clear()
text = figure.suptitle(
mathText,
x = 0.0,
y = 1.0,
horizontalalignment = 'left',
verticalalignment = 'top',
size = QFont().pointSize()*2
)
#
canvas.draw()
(x0,y0),(x1,y1) = text.get_window_extent().get_points()
w=x1-x0; h=y1-y0
figure.set_size_inches(w/80, h/80)
def mathTex_to_QImage(mathTex, fs = 40):
# Enable usetex param for Matplotlib
textUseTexParam = mpl.rcParams['text.usetex']
mpl.rcParams['text.usetex'] = True
#---- set up a mpl figure instance ----
fig = Figure(edgecolor=(0, 0, 0), facecolor="None")
canvas = FigureCanvasQTAgg(fig)
fig.set_canvas(canvas)
renderer = canvas.get_renderer()
#---- plot the mathTex expression ----
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.patch.set_facecolor('none')
t = ax.text(0, 0, mathTex, ha='left', va='bottom', fontsize=fs)
#---- fit figure size to text artist ----
fwidth, fheight = fig.get_size_inches()
fig_bbox = fig.get_window_extent(renderer)
text_bbox = t.get_window_extent(renderer)
tight_fwidth = text_bbox.width * fwidth / fig_bbox.width
tight_fheight = text_bbox.height * fheight / fig_bbox.height
fig.set_size_inches(tight_fwidth, tight_fheight)
#---- convert mpl figure to QPixmap ----
buf, size = fig.canvas.print_to_buffer()
qimage = QImage.rgbSwapped(QImage(buf, size[0], size[1],
QImage.Format.Format_ARGB32))
# Restore matplotlib useText param
mpl.rcParams['text.usetex'] = textUseTexParam
return qimage | PypiClean |
/FoLiA-Linguistic-Annotation-Tool-0.11.tar.gz/FoLiA-Linguistic-Annotation-Tool-0.11/flat/script/foliaspec.js | foliaspec = {
"annotationtype": [
"TEXT",
"TOKEN",
"DIVISION",
"PARAGRAPH",
"HEAD",
"LIST",
"FIGURE",
"WHITESPACE",
"LINEBREAK",
"SENTENCE",
"POS",
"LEMMA",
"DOMAIN",
"SENSE",
"SYNTAX",
"CHUNKING",
"ENTITY",
"CORRECTION",
"ERRORDETECTION",
"PHON",
"SUBJECTIVITY",
"MORPHOLOGICAL",
"EVENT",
"DEPENDENCY",
"TIMESEGMENT",
"GAP",
"QUOTE",
"NOTE",
"REFERENCE",
"RELATION",
"SPANRELATION",
"COREFERENCE",
"SEMROLE",
"METRIC",
"LANG",
"STRING",
"TABLE",
"STYLE",
"PART",
"UTTERANCE",
"ENTRY",
"TERM",
"DEFINITION",
"EXAMPLE",
"PHONOLOGICAL",
"PREDICATE",
"OBSERVATION",
"SENTIMENT",
"STATEMENT",
"ALTERNATIVE",
"RAWCONTENT",
"COMMENT",
"DESCRIPTION",
"HYPHENATION",
"HIDDENTOKEN",
"MODALITY",
"EXTERNAL",
"HSPACE"
],
"annotationtype_doc": {
"alternative": {
"description": "This form of higher-order annotation encapsulates alternative annotations, i.e. annotations that are posed as an alternative option rather than the authoratitive chosen annotation",
"history": "Since the beginning, may carry set and classes since v2.0",
"name": "Alternative Annotation"
},
"chunking": {
"description": "Assigns shallow grammatical categories to spans of words. Unlike syntax annotation, chunks are not nestable. They are often produced by a process called Shallow Parsing, or alternatively, chunking.",
"history": "Since the beginning",
"name": "Chunking"
},
"comment": {
"description": "This is a form of higher-order annotation that allows you to associate comments with almost all other annotation elements",
"history": "Since v1.3",
"name": "Comment Annotation"
},
"coreference": {
"description": "Relations between words that refer to the same referent (anaphora) are expressed in FoLiA using Coreference Annotation. The co-reference relations are expressed by specifying the entire chain in which all links are coreferent.",
"history": "since v0.9",
"name": "Coreference Annotation"
},
"correction": {
"description": "Corrections are one of the most complex annotation types in FoLiA. Corrections can be applied not just over text, but over any type of structure annotation, inline annotation or span annotation. Corrections explicitly preserve the original, and recursively so if corrections are done over other corrections.",
"history": "Since v0.4",
"name": "Correction Annotation"
},
"definition": {
"description": "FoLiA has a set of structure elements that can be used to represent collections such as glossaries, dictionaries, thesauri, and wordnets. `Entry annotation` defines the entries in such collections, `Term annotation` defines the terms, and `Definition Annotation` provides the definitions.",
"history": "since v0.12",
"name": "Definition Annotation"
},
"dependency": {
"description": "Dependency relations are syntactic relations between spans of tokens. A dependency relation takes a particular class and consists of a single head component and a single dependent component.",
"history": "Slightly revised since v0.8 (no ``su`` attribute on ``hd``/``dep``)",
"name": "Dependency Annotation"
},
"description": {
"description": "This is a form of higher-order annotation that allows you to associate descriptions with almost all other annotation elements",
"history": "Since the beginning",
"name": "Description Annotation"
},
"division": {
"description": "Structure annotation representing some kind of division, typically used for chapters, sections, subsections (up to the set definition). Divisions may be nested at will, and may include almost all kinds of other structure elements.",
"history": "Since the beginning",
"name": "Division Annotation"
},
"domain": {
"description": "Domain/topic Annotation. A form of inline annotation used to assign a certain domain or topic to a structure element.",
"history": "Since the beginning",
"name": "Domain/topic Annotation"
},
"entity": {
"description": "Entity annotation is a broad and common category in FoLiA. It is used for specifying all kinds of multi-word expressions, including but not limited to named entities. The set definition used determines the vocabulary and therefore the precise nature of the entity annotation.",
"history": "Since the beginning",
"name": "Entity Annotation"
},
"entry": {
"description": "FoLiA has a set of structure elements that can be used to represent collections such as glossaries, dictionaries, thesauri, and wordnets. `Entry annotation` defines the entries in such collections, `Term annotation` defines the terms, and `Definition Annotation` provides the definitions.",
"history": "since v0.12",
"name": "Entry Annotation"
},
"errordetection": {
"description": "This annotation type is deprecated in favour of `Observation Annotation` and only exists for backward compatibility.",
"history": "Deprecated since v2.0.0",
"name": "Error Detection"
},
"event": {
"description": "Structural annotation type representing events, often used in new media contexts for things such as tweets, chat messages and forum posts (as defined by a user-defined set definition). Note that a more linguistic kind of event annotation can be accomplished with `Entity Annotation` or even `Time Segmentation` rather than this one.",
"history": "since v0.7",
"name": "Event Annotation"
},
"example": {
"description": "FoLiA has a set of structure elements that can be used to represent collections such as glossaries, dictionaries, thesauri, and wordnets. `Examples annotation` defines examples in such collections.",
"history": "since v0.12",
"name": "Example Annotation"
},
"external": {
"description": "External annotation makes a reference to an external FoLiA document whose structure is inserted at the exact place the external element occurs.",
"history": "Since v2.4.0",
"name": "External Annotation"
},
"figure": {
"description": "Structure annotation for including pictures, optionally captioned, in documents.",
"history": "Since the beginning",
"name": "Figure Annotation"
},
"gap": {
"description": "Sometimes there are parts of a document you want to skip and not annotate at all, but include as is. This is where gap annotation comes in, the user-defined set may indicate the kind of gap. Common omissions in books are for example front-matter and back-matter, i.e. the cover.",
"history": "Since the beginning",
"name": "Gap Annotation"
},
"head": {
"description": "The ``head`` element is used to provide a header or title for the structure element in which it is embedded, usually a division (``<div>``)",
"history": "Since the beginning",
"name": "Head Annotation"
},
"hiddentoken": {
"description": "This annotation type allows for a hidden token layer in the document. Hidden tokens are ignored for most intents and purposes but may serve a purpose when annotations on implicit tokens is required, for example as targets for syntactic movement annotation.",
"history": "Since v2.0",
"name": "Hidden Token Annotation"
},
"hspace": {
"description": "Markup annotation introducing horizontal whitespace",
"history": "Since the v2.5.0",
"name": "Horizontal Whitespace"
},
"hyphenation": {
"description": "This is a text-markup annotation form that indicates where in the original text a linebreak was inserted and a word was hyphenised.",
"history": "Since v2.0",
"name": "Hyphenation Annotation"
},
"lang": {
"description": "Language Annotation simply identifies the language a part of the text is in. Though this information is often part of the metadata, this form is considered an actual annotation.",
"history": "since v0.8.1",
"name": "Language Annotation"
},
"lemma": {
"description": "Lemma Annotation, one of the most common types of linguistic annotation. Represents the canonical form of a word.",
"history": "Since the beginning",
"name": "Lemmatisation"
},
"linebreak": {
"description": "Structure annotation representing a single linebreak and with special facilities to denote pagebreaks.",
"history": "Since the beginning",
"name": "Linebreak"
},
"list": {
"description": "Structure annotation for enumeration/itemisation, e.g. bulleted lists.",
"history": "Since the beginning",
"name": "List Annotation"
},
"metric": {
"description": "Metric Annotation is a form of higher-order annotation that allows annotation of some kind of measurement. The type of measurement is defined by the class, which in turn is defined by the set as always. The metric element has a ``value`` attribute that stores the actual measurement, the value is often numeric but this needs not be the case.",
"history": "since v0.9",
"name": "Metric Annotation"
},
"modality": {
"description": "Modality annotation is used to describe the relationship between cue word(s) and the scope it covers. It is primarily used for the annotation of negation, but also for the annotation of factuality, certainty and truthfulness:.",
"history": "Since v2.4.0",
"name": "Modality Annotation"
},
"morphological": {
"description": "Morphological Annotation allows splitting a word/token into morphemes, morphemes itself may be nested. It is embedded within a layer ``morphology`` which can be embedded within word/tokens.",
"history": "Heavily revised since v0.9",
"name": "Morphological Annotation"
},
"note": {
"description": "Structural annotation used for notes, such as footnotes or warnings or notice blocks.",
"history": "Since v0.11",
"name": "Note Annotation"
},
"observation": {
"description": "Observation annotation is used to make an observation pertaining to one or more word tokens. Observations offer a an external qualification on part of a text. The qualification is expressed by the class, in turn defined by a set. The precise semantics of the observation depends on the user-defined set.",
"history": "since v1.3",
"name": "Observation Annotation"
},
"paragraph": {
"description": "Represents a paragraph and holds further structure annotation such as sentences.",
"history": "Since the beginning",
"name": "Paragraph Annotation"
},
"part": {
"description": "The structure element ``part`` is a fairly abstract structure element that should only be used when a more specific structure element is not available. Most notably, the part element should never be used for representation of morphemes or phonemes! Part can be used to divide a larger structure element, such as a division, or a paragraph into arbitrary subparts.",
"history": "since v0.11.2",
"name": "Part Annotation"
},
"phon": {
"description": "This is the phonetic analogy to text content (``<t>``) and allows associating a phonetic transcription with any structural element, it is often used in a speech context. Note that for actual segmentation into phonemes, FoLiA has another related type: ``Phonological Annotation``",
"history": "Since v0.12",
"name": "Phonetic Annotation"
},
"phonological": {
"description": "The smallest unit of annotatable speech in FoLiA is the phoneme level. The phoneme element is a form of structure annotation used for phonemes. Alike to morphology, it is embedded within a layer ``phonology`` which can be embedded within word/tokens.",
"history": "since v0.12",
"name": "Phonological Annotation"
},
"pos": {
"description": "Part-of-Speech Annotation, one of the most common types of linguistic annotation. Assigns a lexical class to words.",
"history": "Since the beginning",
"name": "Part-of-Speech Annotation"
},
"predicate": {
"description": "Allows annotation of predicates, this annotation type is usually used together with Semantic Role Annotation. The types of predicates are defined by a user-defined set definition.",
"history": "since v1.3",
"name": "Predicate Annotation"
},
"quote": {
"description": "Structural annotation used to explicitly mark quoted speech, i.e. that what is reported to be said and appears in the text in some form of quotation marks.",
"history": "Since v0.11",
"name": "Quote Annotation"
},
"rawcontent": {
"description": "This associates raw text content which can not carry any further annotation. It is used in the context of :ref:`gap_annotation`",
"history": "Since the beginning, but revised and made a proper annotation type in v2.0",
"name": "Raw Content"
},
"reference": {
"description": "Structural annotation for referring to other annotation types. Used e.g. for referring to bibliography entries (citations) and footnotes.",
"history": "Since v0.11, external references since v1.2",
"name": "Reference Annotation"
},
"relation": {
"description": "FoLiA provides a facility to relate arbitrary parts of your document with other parts of your document, or even with parts of other FoLiA documents or external resources, even in other formats. It thus allows linking resources together. Within this context, the ``xref`` element is used to refer to the linked FoLiA elements.",
"history": "Revised since v0.8, renamed from alignment in v2.0",
"name": "Relation Annotation"
},
"semrole": {
"description": "This span annotation type allows for the expression of semantic roles, or thematic roles. It is often used together with `Predicate Annotation`",
"history": "since v0.9, revised since v1.3 (added predicates)",
"name": "Semantic Role Annotation"
},
"sense": {
"description": "Sense Annotation allows to assign a lexical semantic sense to a word.",
"history": "Since the beginning",
"name": "Sense Annotation"
},
"sentence": {
"description": "Structure annotation representing a sentence. Sentence detection is a common stage in NLP alongside tokenisation.",
"history": "Since the beginning",
"name": "Sentence Annotation"
},
"sentiment": {
"description": "Sentiment analysis marks subjective information such as sentiments or attitudes expressed in text. The sentiments/attitudes are defined by a user-defined set definition.",
"history": "since v1.3",
"name": "Sentiment Annotation"
},
"spanrelation": {
"description": "Span relations are a stand-off extension of relation annotation that allows for more complex relations, such as word alignments that include many-to-one, one-to-many or many-to-many alignments. One of its uses is in the alignment of multiple translations of (parts) of a text.",
"history": "since v0.8, renamed from complexalignment in v2.0",
"name": "Span Relation Annotation"
},
"statement": {
"description": "Statement annotation, sometimes also refered to as attribution, allows to decompose statements into the source of the statement, the content of the statement, and the way these relate, provided these are made explicit in the text.",
"history": "since v1.3",
"name": "Statement Annotation"
},
"string": {
"description": "This is a form of higher-order annotation for selecting an arbitrary substring of a text, even untokenised, and allows further forms of higher-order annotation on the substring. It is also tied to a form of text markup annotation.",
"history": "since v0.9.1",
"name": "String Annotation"
},
"style": {
"description": "This is a text markup annotation type for applying styling to text. The actual styling is defined by the user-defined set definition and can for example included classes such as italics, bold, underline",
"history": "since v0.10",
"name": "Style Annotation"
},
"subjectivity": {
"description": "This annotation type is deprecated in favour of `Sentiment Annotation` and only exists for backward compatibility.",
"history": "Deprecated since v1.5",
"name": "Subjectivity Annotation"
},
"syntax": {
"description": "Assign grammatical categories to spans of words. Syntactic units are nestable and allow representation of complete syntax trees that are usually the result of consistuency parsing.",
"history": "Since the beginning",
"name": "Syntactic Annotation"
},
"table": {
"description": "Structural annotation type for creating a simple tabular environment, i.e. a table with rows, columns and cells and an optional header.",
"history": "since v0.9.2",
"name": "Table Annotation"
},
"term": {
"description": "FoLiA has a set of structure elements that can be used to represent collections such as glossaries, dictionaries, thesauri, and wordnets. `Entry annotation` defines the entries in such collections, `Term annotation` defines the terms, and `Definition Annotation` provides the definitions.",
"history": "since v0.12",
"name": "Term Annotation"
},
"text": {
"description": "Text annotation associates actual textual content with structural elements, without it a document would be textless. FoLiA treats it as an annotation like any other.",
"history": "Since the beginning, revised since v0.6",
"name": "Text Annotation"
},
"timesegment": {
"description": "FoLiA supports time segmentation to allow for more fine-grained control of timing information by associating spans of words/tokens with exact timestamps. It can provide a more linguistic alternative to `Event Annotation`.",
"history": "Since v0.8 but renamed since v0.9",
"name": "Time Segmentation"
},
"token": {
"description": "This annotation type introduces a tokenisation layer for the document. The terms **token** and **word** are used interchangeably in FoLiA as FoLiA itself does not commit to a specific tokenisation paradigm. Tokenisation is a prerequisite for the majority of linguistic annotation types offered by FoLiA and it is one of the most fundamental types of Structure Annotation. The words/tokens are typically embedded in other types of structure elements, such as sentences or paragraphs.",
"history": "Since the beginning",
"name": "Token Annotation"
},
"utterance": {
"description": "An utterance is a structure element that may consist of words or sentences, which in turn may contain words. The opposite is also true, a sentence may consist of multiple utterances. Utterances are often used in the absence of sentences in a speech context, where neat grammatical sentences can not always be distinguished.",
"history": "since v0.12",
"name": "Utterance Annotation"
},
"whitespace": {
"description": "Structure annotation introducing vertical whitespace",
"history": "Since the beginning",
"name": "Vertical Whitespace"
}
},
"attributes": [
"ID",
"CLASS",
"ANNOTATOR",
"CONFIDENCE",
"N",
"DATETIME",
"BEGINTIME",
"ENDTIME",
"SRC",
"SPEAKER",
"TEXTCLASS",
"METADATA",
"IDREF",
"SPACE",
"TAG"
],
"attributes_doc": {
"annotator": {
"description": "This is an older alternative to the ``processor`` attribute, without support for full provenance. The annotator attribute simply refers to the name o ID of the system or human annotator that made the annotation.",
"group": "authorship",
"name": "annotator"
},
"annotatortype": {
"description": "This is an older alternative to the ``processor`` attribute, without support for full provenance. It is used together with ``annotator`` and specific the type of the annotator, either ``manual`` for human annotators or ``auto`` for automated systems.",
"group": "authorship",
"name": "annotatortype"
},
"begintime": {
"description": "A timestamp in ``HH:MM:SS.MMM`` format, indicating the begin time of the speech. If a sound clip is specified (``src``); the timestamp refers to a location in the soundclip.",
"group": "speech",
"name": "begintime"
},
"class": {
"description": "The class of the annotation, i.e. the annotation tag in the vocabulary defined by ``set``.",
"group": "core",
"name": "class"
},
"confidence": {
"description": "A floating point value between zero and one; expresses the confidence the annotator places in his annotation.",
"group": "annotation",
"name": "confidence"
},
"datetime": {
"description": "The date and time when this annotation was recorded, the format is ``YYYY-MM-DDThh:mm:ss`` (note the literal T in the middle to separate date from time), as per the XSD Datetime data type.",
"group": "annotation",
"name": "datetime"
},
"endtime": {
"description": "A timestamp in ``HH:MM:SS.MMM`` format, indicating the end time of the speech. If a sound clip is specified (``src``); the timestamp refers to a location in the soundclip.",
"group": "speech",
"name": "endtime"
},
"id": {
"description": "The ID of the element; this has to be a unique in the entire document or collection of documents (corpus). All identifiers in FoLiA are of the `XML NCName <https://www.w3.org/TR/1999/WD-xmlschema-2-19990924/#NCName>`_ datatype, which roughly means it is a unique string that has to start with a letter (not a number or symbol), may contain numbers, but may never contain colons or spaces. FoLiA does not define any naming convention for IDs.",
"group": "core",
"name": "xml:id"
},
"idref": {
"description": "A reference to the ID of another element. This is a reference and not an assignment, unlike xml:id, so do not confuse the two! It is only supported on certain elements that are referential in nature.",
"group": "core",
"name": "id"
},
"n": {
"description": "A number in a sequence, corresponding to a number in the original document, for example chapter numbers, section numbers, list item numbers. This this not have to be an actual number but other sequence identifiers are also possible (think alphanumeric characters or roman numerals).",
"group": "annotation",
"name": "n"
},
"processor": {
"description": "This refers to the ID of a processor in the :ref:`provenance_data`. The processor in turn defines exactly who or what was the annotator of the annotation.",
"group": "provenance",
"name": "processor"
},
"set": {
"description": "The set of the element, ideally a URI linking to a set definition (see :ref:`set_definitions`) or otherwise a uniquely identifying string. The ``set`` must be referred to also in the :ref:`annotation_declarations` for this annotation type.",
"group": "core",
"name": "set"
},
"space": {
"description": "This attribute indicates whether spacing should be inserted after this element (it's default value is always ``yes``, so it does not need to be specified in that case), but if tokens or other structural elements are glued together then the value should be set to ``no``. This allows for reconstruction of the detokenised original text. ",
"group": "annotation",
"name": "space"
},
"speaker": {
"description": "A string identifying the speaker. This attribute is inheritable. Multiple speakers are not allowed, simply do not specify a speaker on a certain level if you are unable to link the speech to a specific (single) speaker.",
"group": "speech",
"name": "speaker"
},
"src": {
"description": "Points to a file or full URL of a sound or video file. This attribute is inheritable.",
"group": "speech",
"name": "src"
},
"tag": {
"description": "Contains a space separated list of processing tags associated with the element. A processing tag carries arbitrary user-defined information that may aid in processing a document. It may carry cues on how a specific tool should treat a specific element. The tag vocabulary is specific to the tool that processes the document. Tags carry no instrinsic meaning for the data representation and should not be used except to inform/aid processors in their task. Processors are encouraged to clean up the tags they use. Ideally, published FoLiA documents at the end of a processing pipeline carry no further tags. For encoding actual data, use ``class`` and optionally features instead.",
"name": "tag"
},
"textclass": {
"description": "Refers to the text class this annotation is based on. This is an advanced attribute, if not specified, it defaults to ``current``. See :ref:`textclass_attribute`.",
"group": "annotation",
"name": "textclass"
}
},
"categories": {
"content": {
"class": "AbstractContentAnnotation",
"description": "This category groups text content and phonetic content, the former being one of the most frequent elements in FoLiA and used to associate text (or a phonetic transcription) with a structural element.",
"name": "Content Annotation"
},
"higherorder": {
"class": "AbstractHigherOrderAnnotation",
"description": "Higher-order Annotation groups a very diverse set of annotation types that are considered *annotations on annotations*",
"name": "Higher-order Annotation"
},
"inline": {
"class": "AbstractInlineAnnotation",
"description": "This category encompasses (linguistic) annotation types describing a single structural element. Examples are Part-of-Speech Annotation or Lemmatisation, which often describe a single token.",
"name": "Inline Annotation"
},
"span": {
"class": "AbstractSpanAnnotation",
"description": "This category encompasses (linguistic) annotation types that span one or more structural elements. Examples are (Named) Entities or Multi-word Expressions, Dependency Relations, and many others. FoLiA implements these as a stand-off layer that refers back to the structural elements (often words/tokens). The layer itself is embedded in a structural level of a wider scope (such as a sentence).",
"name": "Span Annotation"
},
"structure": {
"class": "AbstractStructureElement",
"description": "This category encompasses annotation types that define the structure of a document, e.g. paragraphs, sentences, words, sections like chapters, lists, tables, etc... These types are not strictly considered linguistic annotation and equivalents are also commonly found in other document formats such as HTML, TEI, MarkDown, LaTeX, and others. For FoLiA it provides the necessary structural basis that linguistic annotation can build on.",
"name": "Structure Annotation"
},
"subtoken": {
"class": "AbstractSubtokenAnnotation",
"description": "This category contains morphological annotation and phonological annotation, i.e. the segmentation of a word into morphemes and phonemes, and recursively so if desired. It is a special category that mixes characteristics from structure annotation (the ``morpheme`` and ``phoneme`` elements are very structure-like) and also from span annotation, as morphemes and phonemes are embedded in an annotation layer and refer back to the text/phonetic content they apply to. Like words/tokens, these elements may also be referenced from ``wref`` elements.",
"name": "Subtoken Annotation"
},
"textmarkup": {
"class": "AbstractTextMarkup",
"description": "The text content element (``<t>``) allows within its scope elements of a this category; these are **Text Markup** elements, they always contain textual content and apply a certain markup to certain spans of the text. One of it's common uses is for styling (emphasis, underlines, etc.). Text markup elements may be nested.",
"name": "Text Markup Annotation"
}
},
"default_ignore": [
"Original",
"Suggestion",
"Alternative",
"AlternativeLayers",
"ForeignData"
],
"default_ignore_annotations": [
"Original",
"Suggestion",
"Alternative",
"AlternativeLayers",
"MorphologyLayer",
"PhonologyLayer"
],
"default_ignore_structure": [
"Original",
"Suggestion",
"Alternative",
"AlternativeLayers",
"AbstractAnnotationLayer"
],
"defaultproperties": {
"accepted_data": [
"Description",
"Comment"
],
"annotationtype": null,
"auth": true,
"auto_generate_id": false,
"hidden": false,
"occurrences": 0,
"occurrences_per_set": 0,
"optional_attribs": null,
"phoncontainer": false,
"primaryelement": true,
"printable": false,
"required_attribs": null,
"required_data": null,
"setonly": false,
"speakable": false,
"subset": null,
"textcontainer": false,
"textdelimiter": null,
"wrefable": false,
"xlink": false,
"xmltag": null
},
"elements": [
{
"class": "AbstractAnnotationLayer",
"elements": [
{
"class": "ChunkingLayer",
"properties": {
"accepted_data": [
"Chunk"
],
"annotationtype": "CHUNKING",
"primaryelement": false,
"xmltag": "chunking"
}
},
{
"class": "SpanRelationLayer",
"properties": {
"accepted_data": [
"SpanRelation"
],
"annotationtype": "SPANRELATION",
"primaryelement": false,
"xmltag": "spanrelations"
}
},
{
"class": "CoreferenceLayer",
"properties": {
"accepted_data": [
"CoreferenceChain"
],
"annotationtype": "COREFERENCE",
"primaryelement": false,
"xmltag": "coreferences"
}
},
{
"class": "DependenciesLayer",
"properties": {
"accepted_data": [
"Dependency"
],
"annotationtype": "DEPENDENCY",
"primaryelement": false,
"xmltag": "dependencies"
}
},
{
"class": "EntitiesLayer",
"properties": {
"accepted_data": [
"Entity"
],
"annotationtype": "ENTITY",
"primaryelement": false,
"xmltag": "entities"
}
},
{
"class": "MorphologyLayer",
"properties": {
"accepted_data": [
"Morpheme"
],
"annotationtype": "MORPHOLOGICAL",
"primaryelement": false,
"xmltag": "morphology"
}
},
{
"class": "ObservationLayer",
"properties": {
"accepted_data": [
"Observation"
],
"annotationtype": "OBSERVATION",
"primaryelement": false,
"xmltag": "observations"
}
},
{
"class": "PhonologyLayer",
"properties": {
"accepted_data": [
"Phoneme"
],
"annotationtype": "PHONOLOGICAL",
"primaryelement": false,
"xmltag": "phonology"
}
},
{
"class": "SemanticRolesLayer",
"properties": {
"accepted_data": [
"SemanticRole",
"Predicate"
],
"annotationtype": "SEMROLE",
"primaryelement": false,
"xmltag": "semroles"
}
},
{
"class": "SentimentLayer",
"properties": {
"accepted_data": [
"Sentiment"
],
"annotationtype": "SENTIMENT",
"primaryelement": false,
"xmltag": "sentiments"
}
},
{
"class": "StatementLayer",
"properties": {
"accepted_data": [
"Statement"
],
"annotationtype": "STATEMENT",
"primaryelement": false,
"xmltag": "statements"
}
},
{
"class": "SyntaxLayer",
"properties": {
"accepted_data": [
"SyntacticUnit"
],
"annotationtype": "SYNTAX",
"primaryelement": false,
"xmltag": "syntax"
}
},
{
"class": "TimingLayer",
"properties": {
"accepted_data": [
"TimeSegment"
],
"annotationtype": "TIMESEGMENT",
"primaryelement": false,
"xmltag": "timing"
}
},
{
"class": "ModalitiesLayer",
"properties": {
"accepted_data": [
"Modality"
],
"annotationtype": "MODALITY",
"primaryelement": false,
"xmltag": "modalities"
}
}
],
"properties": {
"accepted_data": [
"Correction",
"ForeignData"
],
"optional_attribs": [
"ID",
"TAG"
],
"printable": false,
"setonly": true,
"speakable": false
}
},
{
"class": "AbstractCorrectionChild",
"elements": [
{
"class": "Current",
"properties": {
"annotationtype": "CORRECTION",
"occurrences": 1,
"optional_attribs": [
"TAG"
],
"primaryelement": false,
"xmltag": "current"
}
},
{
"class": "New",
"properties": {
"annotationtype": "CORRECTION",
"occurrences": 1,
"optional_attribs": [
"TAG"
],
"primaryelement": false,
"xmltag": "new"
}
},
{
"class": "Original",
"properties": {
"annotationtype": "CORRECTION",
"auth": false,
"occurrences": 1,
"optional_attribs": [
"TAG"
],
"primaryelement": false,
"xmltag": "original"
}
},
{
"class": "Suggestion",
"properties": {
"annotationtype": "CORRECTION",
"auth": false,
"occurrences": 0,
"optional_attribs": [
"CONFIDENCE",
"N",
"TAG"
],
"primaryelement": false,
"xmltag": "suggestion"
}
}
],
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"AbstractSpanAnnotation",
"AbstractStructureElement",
"Correction",
"Metric",
"PhonContent",
"String",
"TextContent",
"ForeignData"
],
"optional_attribs": [
"ID",
"ANNOTATOR",
"CONFIDENCE",
"DATETIME",
"N",
"TAG"
],
"printable": true,
"speakable": true,
"textdelimiter": null
}
},
{
"class": "AbstractSpanAnnotation",
"elements": [
{
"class": "AbstractSpanRole",
"elements": [
{
"class": "CoreferenceLink",
"properties": {
"accepted_data": [
"Headspan",
"LevelFeature",
"ModalityFeature",
"TimeFeature"
],
"annotationtype": "COREFERENCE",
"label": "Coreference Link",
"primaryelement": false,
"xmltag": "coreferencelink"
}
},
{
"class": "DependencyDependent",
"properties": {
"label": "Dependent",
"occurrences": 1,
"primaryelement": false,
"xmltag": "dep"
}
},
{
"class": "Headspan",
"properties": {
"label": "Head",
"occurrences": 1,
"primaryelement": false,
"xmltag": "hd"
}
},
{
"class": "StatementRelation",
"properties": {
"label": "Relation",
"occurrences": 1,
"primaryelement": false,
"xmltag": "rel"
}
},
{
"class": "Source",
"properties": {
"label": "Source",
"occurrences": 1,
"primaryelement": false,
"xmltag": "source"
}
},
{
"class": "Target",
"properties": {
"label": "Target",
"occurrences": 1,
"primaryelement": false,
"xmltag": "target"
}
},
{
"class": "Cue",
"properties": {
"label": "Cue",
"occurrences": 1,
"primaryelement": false,
"xmltag": "cue"
}
},
{
"class": "Scope",
"properties": {
"accepted_data": [
"Cue",
"Source",
"Target"
],
"label": "Scope",
"occurrences": 1,
"primaryelement": false,
"xmltag": "scope"
}
}
],
"properties": {
"accepted_data": [
"Feature",
"WordReference",
"LinkReference"
],
"optional_attribs": [
"ID",
"TAG"
],
"primaryelement": false
}
},
{
"class": "Chunk",
"properties": {
"accepted_data": [
"Feature",
"WordReference"
],
"annotationtype": "CHUNKING",
"label": "Chunk",
"xmltag": "chunk"
}
},
{
"class": "CoreferenceChain",
"properties": {
"accepted_data": [
"Feature",
"CoreferenceLink"
],
"annotationtype": "COREFERENCE",
"label": "Coreference Chain",
"required_data": [
"CoreferenceLink"
],
"xmltag": "coreferencechain"
}
},
{
"class": "Modality",
"properties": {
"accepted_data": [
"Scope",
"Feature",
"Cue",
"Source",
"Target",
"PolarityFeature",
"StrengthFeature"
],
"annotationtype": "MODALITY",
"label": "Modality",
"xmltag": "modality"
}
},
{
"class": "Dependency",
"properties": {
"accepted_data": [
"DependencyDependent",
"Feature",
"Headspan"
],
"annotationtype": "DEPENDENCY",
"label": "Dependency",
"required_data": [
"DependencyDependent",
"Headspan"
],
"xmltag": "dependency"
}
},
{
"class": "Entity",
"properties": {
"accepted_data": [
"Feature",
"WordReference"
],
"annotationtype": "ENTITY",
"label": "Entity",
"xmltag": "entity"
}
},
{
"class": "Observation",
"properties": {
"accepted_data": [
"Feature",
"WordReference"
],
"annotationtype": "OBSERVATION",
"label": "Observation",
"xmltag": "observation"
}
},
{
"class": "Predicate",
"properties": {
"accepted_data": [
"Feature",
"SemanticRole",
"WordReference"
],
"annotationtype": "PREDICATE",
"label": "Predicate",
"xmltag": "predicate"
}
},
{
"class": "SemanticRole",
"properties": {
"accepted_data": [
"Feature",
"Headspan",
"WordReference"
],
"annotationtype": "SEMROLE",
"label": "Semantic Role",
"required_attribs": [
"CLASS"
],
"xmltag": "semrole"
}
},
{
"class": "Sentiment",
"properties": {
"accepted_data": [
"Feature",
"Headspan",
"PolarityFeature",
"StrengthFeature",
"Source",
"Target",
"WordReference"
],
"annotationtype": "SENTIMENT",
"label": "Sentiment",
"xmltag": "sentiment"
}
},
{
"class": "Statement",
"properties": {
"accepted_data": [
"Feature",
"Headspan",
"StatementRelation",
"Source",
"WordReference"
],
"annotationtype": "STATEMENT",
"label": "Statement",
"xmltag": "statement"
}
},
{
"class": "SyntacticUnit",
"properties": {
"accepted_data": [
"Feature",
"SyntacticUnit",
"WordReference"
],
"annotationtype": "SYNTAX",
"label": "Syntactic Unit",
"xmltag": "su"
}
},
{
"class": "TimeSegment",
"properties": {
"accepted_data": [
"ActorFeature",
"BegindatetimeFeature",
"EnddatetimeFeature",
"Feature",
"WordReference"
],
"annotationtype": "TIMESEGMENT",
"label": "Time Segment",
"xmltag": "timesegment"
}
}
],
"properties": {
"accepted_data": [
"Metric",
"Relation",
"ForeignData",
"LinkReference",
"AbstractInlineAnnotation"
],
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"TEXTCLASS",
"METADATA",
"TAG"
],
"printable": true,
"speakable": true
}
},
{
"class": "AbstractStructureElement",
"elements": [
{
"class": "Caption",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Gap",
"Linebreak",
"Paragraph",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"TextContent",
"Whitespace"
],
"label": "Caption",
"occurrences": 1,
"optional_attribs": [
"ID",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"SPACE",
"TAG"
],
"xmltag": "caption"
}
},
{
"class": "Cell",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Entry",
"Event",
"Example",
"Figure",
"Gap",
"Head",
"Linebreak",
"List",
"Note",
"Paragraph",
"Quote",
"Reference",
"Sentence",
"String",
"TextContent",
"Whitespace",
"Word",
"Hiddenword"
],
"label": "Cell",
"optional_attribs": [
"ID",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"SPACE",
"TAG"
],
"textdelimiter": " | ",
"xmltag": "cell"
}
},
{
"class": "Definition",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Figure",
"List",
"Metric",
"Paragraph",
"PhonContent",
"Reference",
"Sentence",
"String",
"Table",
"TextContent",
"Utterance",
"Word",
"Hiddenword",
"Linebreak",
"Whitespace"
],
"annotationtype": "DEFINITION",
"label": "Definition",
"xmltag": "def"
}
},
{
"class": "Division",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Division",
"Entry",
"Event",
"Example",
"Figure",
"Gap",
"Head",
"Linebreak",
"List",
"Note",
"Paragraph",
"Part",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"Table",
"TextContent",
"Utterance",
"Whitespace",
"Word"
],
"annotationtype": "DIVISION",
"label": "Division",
"textdelimiter": "\n\n\n",
"xmltag": "div"
}
},
{
"class": "Entry",
"properties": {
"accepted_data": [
"Definition",
"Example",
"Term",
"TextContent",
"String"
],
"annotationtype": "ENTRY",
"label": "Entry",
"xmltag": "entry"
}
},
{
"class": "Event",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"ActorFeature",
"BegindatetimeFeature",
"Division",
"EnddatetimeFeature",
"Entry",
"Event",
"Example",
"Figure",
"Gap",
"Head",
"Linebreak",
"List",
"Note",
"Paragraph",
"Part",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"Table",
"TextContent",
"Utterance",
"Whitespace",
"Word",
"Hiddenword"
],
"annotationtype": "EVENT",
"label": "Event",
"xmltag": "event"
}
},
{
"class": "Example",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Figure",
"Linebreak",
"List",
"Paragraph",
"PhonContent",
"Reference",
"Sentence",
"String",
"Table",
"TextContent",
"Utterance",
"Word",
"Hiddenword",
"Whitespace"
],
"annotationtype": "EXAMPLE",
"label": "Example",
"xmltag": "ex"
}
},
{
"class": "Figure",
"properties": {
"accepted_data": [
"Caption",
"String",
"TextContent",
"Linebreak"
],
"annotationtype": "FIGURE",
"label": "Figure",
"speakable": false,
"textdelimiter": "\n\n",
"xmltag": "figure"
}
},
{
"class": "Head",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Event",
"Gap",
"Linebreak",
"Paragraph",
"PhonContent",
"Reference",
"Sentence",
"String",
"TextContent",
"Whitespace",
"Word",
"Hiddenword"
],
"annotationtype": "HEAD",
"label": "Head",
"textdelimiter": "\n\n",
"xmltag": "head"
}
},
{
"class": "Hiddenword",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"PhonContent",
"Reference",
"String",
"TextContent"
],
"annotationtype": "HIDDENTOKEN",
"hidden": true,
"label": "Hidden Word/Token",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"TEXTCLASS",
"METADATA",
"SPACE",
"TAG"
],
"textdelimiter": " ",
"wrefable": true,
"xmltag": "hiddenw"
}
},
{
"class": "Label",
"properties": {
"accepted_data": [
"Word",
"Hiddenword",
"Reference",
"TextContent",
"PhonContent",
"String",
"Relation",
"Metric",
"Alternative",
"Alternative",
"AlternativeLayers",
"AbstractAnnotationLayer",
"AbstractInlineAnnotation",
"Correction",
"Part",
"Linebreak",
"Whitespace"
],
"label": "Label",
"xmltag": "label"
}
},
{
"class": "Linebreak",
"properties": {
"annotationtype": "LINEBREAK",
"label": "Linebreak",
"textdelimiter": "",
"xlink": true,
"xmltag": "br"
}
},
{
"class": "List",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Relation",
"Caption",
"Event",
"Linebreak",
"ListItem",
"Metric",
"Note",
"PhonContent",
"Reference",
"String",
"TextContent"
],
"annotationtype": "LIST",
"label": "List",
"textdelimiter": "\n\n",
"xmltag": "list"
}
},
{
"class": "ListItem",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Event",
"Gap",
"Label",
"Linebreak",
"List",
"Note",
"Paragraph",
"Part",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"TextContent",
"Whitespace",
"Word",
"Hiddenword"
],
"label": "List Item",
"optional_attribs": [
"ID",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"textdelimiter": "\n",
"xmltag": "item"
}
},
{
"class": "Note",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Example",
"Figure",
"Head",
"Linebreak",
"List",
"Paragraph",
"PhonContent",
"Reference",
"Sentence",
"String",
"Table",
"TextContent",
"Utterance",
"Whitespace",
"Word",
"Hiddenword"
],
"annotationtype": "NOTE",
"label": "Note",
"xmltag": "note"
}
},
{
"class": "Paragraph",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Entry",
"Event",
"Example",
"Figure",
"Gap",
"Head",
"Linebreak",
"List",
"Note",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"TextContent",
"Whitespace",
"Word",
"Hiddenword"
],
"annotationtype": "PARAGRAPH",
"label": "Paragraph",
"textdelimiter": "\n\n",
"xmltag": "p"
}
},
{
"class": "Part",
"properties": {
"accepted_data": [
"AbstractStructureElement",
"AbstractInlineAnnotation",
"TextContent",
"PhonContent"
],
"annotationtype": "PART",
"label": "Part",
"textdelimiter": " ",
"xmltag": "part"
}
},
{
"class": "Quote",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Division",
"Gap",
"Linebreak",
"Paragraph",
"Quote",
"Sentence",
"String",
"TextContent",
"Utterance",
"Whitespace",
"Word",
"Hiddenword",
"Reference"
],
"annotationtype": "QUOTE",
"label": "Quote",
"xmltag": "quote"
}
},
{
"class": "Reference",
"properties": {
"accepted_data": [
"PhonContent",
"Paragraph",
"Quote",
"Sentence",
"String",
"TextContent",
"Utterance",
"Word",
"Hiddenword",
"Linebreak",
"Whitespace"
],
"annotationtype": "REFERENCE",
"label": "Reference",
"textdelimiter": " ",
"xlink": true,
"xmltag": "ref"
}
},
{
"class": "Row",
"properties": {
"accepted_data": [
"Cell",
"AbstractInlineAnnotation"
],
"label": "Table Row",
"textdelimiter": "\n",
"xmltag": "row"
}
},
{
"class": "Sentence",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Entry",
"Event",
"Example",
"Gap",
"Linebreak",
"Note",
"PhonContent",
"Quote",
"Reference",
"String",
"TextContent",
"Whitespace",
"Word",
"Hiddenword"
],
"annotationtype": "SENTENCE",
"label": "Sentence",
"textdelimiter": " ",
"xmltag": "s"
}
},
{
"class": "Speech",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Division",
"Entry",
"Event",
"Example",
"External",
"Gap",
"List",
"Note",
"Paragraph",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"TextContent",
"Utterance",
"Word",
"Hiddenword"
],
"label": "Speech Body",
"optional_attribs": [
"ID",
"ANNOTATOR",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"SPACE",
"TAG"
],
"textdelimiter": "\n\n\n",
"xmltag": "speech"
}
},
{
"class": "Table",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Row",
"TableHead",
"Linebreak"
],
"annotationtype": "TABLE",
"label": "Table",
"xmltag": "table"
}
},
{
"class": "TableHead",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Row"
],
"label": "Table Header",
"optional_attribs": [
"ID",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"xmltag": "tablehead"
}
},
{
"class": "Term",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Event",
"Figure",
"Gap",
"List",
"Paragraph",
"PhonContent",
"Reference",
"Sentence",
"String",
"Table",
"TextContent",
"Utterance",
"Word",
"Hiddenword",
"Linebreak",
"Whitespace"
],
"annotationtype": "TERM",
"label": "Term",
"xmltag": "term"
}
},
{
"class": "Text",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Division",
"Entry",
"Event",
"Example",
"External",
"Figure",
"Gap",
"List",
"Note",
"Paragraph",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"Table",
"TextContent",
"Word",
"Hiddenword",
"Linebreak",
"Whitespace"
],
"label": "Text Body",
"optional_attribs": [
"ID",
"ANNOTATOR",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"SPACE",
"TAG"
],
"textdelimiter": "\n\n\n",
"xmltag": "text"
}
},
{
"class": "Utterance",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Gap",
"Note",
"PhonContent",
"Quote",
"Reference",
"Sentence",
"String",
"TextContent",
"Word",
"Hiddenword"
],
"annotationtype": "UTTERANCE",
"label": "Utterance",
"textdelimiter": " ",
"xmltag": "utt"
}
},
{
"class": "Whitespace",
"properties": {
"annotationtype": "WHITESPACE",
"label": "Whitespace",
"textdelimiter": "",
"xmltag": "whitespace"
}
},
{
"class": "Word",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"PhonContent",
"Reference",
"String",
"TextContent"
],
"annotationtype": "TOKEN",
"label": "Word/Token",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"TEXTCLASS",
"METADATA",
"SPACE",
"TAG"
],
"textdelimiter": " ",
"wrefable": true,
"xmltag": "w"
}
}
],
"properties": {
"accepted_data": [
"AbstractAnnotationLayer",
"External",
"Relation",
"Alternative",
"AlternativeLayers",
"Correction",
"Feature",
"Metric",
"Part",
"ForeignData"
],
"auto_generate_id": true,
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"SPACE",
"TAG"
],
"printable": true,
"required_attribs": null,
"speakable": true,
"textdelimiter": "\n\n"
}
},
{
"class": "AbstractSubtokenAnnotation",
"elements": [
{
"class": "Morpheme",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"FunctionFeature",
"Morpheme",
"PhonContent",
"String",
"TextContent"
],
"annotationtype": "MORPHOLOGICAL",
"label": "Morpheme",
"textdelimiter": "",
"wrefable": true,
"xmltag": "morpheme"
}
},
{
"class": "Phoneme",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"FunctionFeature",
"PhonContent",
"Phoneme",
"String",
"TextContent"
],
"annotationtype": "PHONOLOGICAL",
"label": "Phoneme",
"textdelimiter": "",
"wrefable": true,
"xmltag": "phoneme"
}
}
],
"properties": {
"accepted_data": [
"AbstractAnnotationLayer",
"Relation",
"Alternative",
"AlternativeLayers",
"Correction",
"Feature",
"Metric",
"Part",
"ForeignData"
],
"auto_generate_id": true,
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"printable": true,
"required_attribs": null,
"speakable": true,
"textdelimiter": "\n\n"
}
},
{
"class": "AbstractTextMarkup",
"elements": [
{
"class": "TextMarkupCorrection",
"properties": {
"annotationtype": "CORRECTION",
"primaryelement": false,
"xmltag": "t-correction"
}
},
{
"class": "TextMarkupError",
"properties": {
"annotationtype": "ERRORDETECTION",
"primaryelement": false,
"xmltag": "t-error"
}
},
{
"class": "TextMarkupGap",
"properties": {
"annotationtype": "GAP",
"primaryelement": false,
"xmltag": "t-gap"
}
},
{
"class": "TextMarkupString",
"properties": {
"annotationtype": "STRING",
"primaryelement": false,
"xmltag": "t-str"
}
},
{
"class": "TextMarkupStyle",
"properties": {
"accepted_data": [
"FontFeature",
"SizeFeature"
],
"annotationtype": "STYLE",
"primaryelement": true,
"xmltag": "t-style"
}
},
{
"class": "Hyphbreak",
"properties": {
"annotationtype": "HYPHENATION",
"label": "Hyphbreak",
"textdelimiter": "",
"xmltag": "t-hbr"
}
},
{
"class": "TextMarkupReference",
"properties": {
"annotationtype": "REFERENCE",
"primaryelement": false,
"xmltag": "t-ref"
}
},
{
"class": "TextMarkupWhitespace",
"properties": {
"annotationtype": "WHITESPACE",
"primaryelement": false,
"textdelimiter": "",
"xmltag": "t-whitespace"
}
},
{
"class": "TextMarkupHSpace",
"properties": {
"annotationtype": "HSPACE",
"textdelimiter": "",
"xmltag": "t-hspace"
}
},
{
"class": "TextMarkupLanguage",
"properties": {
"annotationtype": "LANG",
"primaryelement": false,
"xmltag": "t-lang"
}
}
],
"properties": {
"accepted_data": [
"AbstractTextMarkup",
"Linebreak",
"Feature"
],
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"primaryelement": false,
"printable": true,
"textcontainer": true,
"textdelimiter": "",
"xlink": true
}
},
{
"class": "AbstractInlineAnnotation",
"elements": [
{
"class": "DomainAnnotation",
"properties": {
"annotationtype": "DOMAIN",
"label": "Domain",
"occurrences_per_set": 0,
"xmltag": "domain"
}
},
{
"class": "ErrorDetection",
"properties": {
"annotationtype": "ERRORDETECTION",
"label": "Error Detection",
"occurrences_per_set": 0,
"xmltag": "errordetection"
}
},
{
"class": "LangAnnotation",
"properties": {
"annotationtype": "LANG",
"label": "Language",
"xmltag": "lang"
}
},
{
"class": "LemmaAnnotation",
"properties": {
"annotationtype": "LEMMA",
"label": "Lemma",
"xmltag": "lemma"
}
},
{
"class": "PosAnnotation",
"properties": {
"accepted_data": [
"HeadFeature"
],
"annotationtype": "POS",
"label": "Part-of-Speech",
"xmltag": "pos"
}
},
{
"class": "SenseAnnotation",
"properties": {
"accepted_data": [
"SynsetFeature"
],
"annotationtype": "SENSE",
"label": "Semantic Sense",
"occurrences_per_set": 0,
"xmltag": "sense"
}
},
{
"class": "SubjectivityAnnotation",
"properties": {
"annotationtype": "SUBJECTIVITY",
"label": "Subjectivity/Sentiment",
"xmltag": "subjectivity"
}
}
],
"properties": {
"accepted_data": [
"Feature",
"Metric",
"ForeignData"
],
"occurrences_per_set": 1,
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"TEXTCLASS",
"METADATA",
"TAG"
],
"required_attribs": [
"CLASS"
]
}
},
{
"class": "AbstractHigherOrderAnnotation",
"elements": [
{
"class": "Relation",
"properties": {
"accepted_data": [
"LinkReference",
"Metric",
"Feature",
"ForeignData"
],
"annotationtype": "RELATION",
"label": "Relation",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"printable": false,
"required_attribs": null,
"speakable": false,
"xlink": true,
"xmltag": "relation"
}
},
{
"class": "Alternative",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Correction",
"ForeignData",
"MorphologyLayer",
"PhonologyLayer"
],
"annotationtype": "ALTERNATIVE",
"auth": false,
"label": "Alternative",
"optional_attribs": [
"ID",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"printable": false,
"required_attribs": null,
"speakable": false,
"xmltag": "alt"
}
},
{
"class": "AlternativeLayers",
"properties": {
"accepted_data": [
"AbstractAnnotationLayer",
"ForeignData"
],
"annotationtype": "ALTERNATIVE",
"auth": false,
"label": "Alternative Layers",
"optional_attribs": [
"ID",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"primaryelement": false,
"printable": false,
"required_attribs": null,
"speakable": false,
"xmltag": "altlayers"
}
},
{
"class": "SpanRelation",
"properties": {
"accepted_data": [
"Relation",
"Metric",
"Feature",
"ForeignData"
],
"annotationtype": "SPANRELATION",
"label": "Span Relation",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"printable": false,
"required_attribs": null,
"speakable": false,
"xmltag": "spanrelation"
}
},
{
"class": "Correction",
"properties": {
"accepted_data": [
"New",
"Original",
"Current",
"Suggestion",
"ErrorDetection",
"Metric",
"Feature",
"ForeignData"
],
"annotationtype": "CORRECTION",
"label": "Correction",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"printable": true,
"speakable": true,
"textdelimiter": null,
"xmltag": "correction"
}
},
{
"class": "Comment",
"properties": {
"annotationtype": "COMMENT",
"label": "Comment",
"optional_attribs": [
"ID",
"ANNOTATOR",
"CONFIDENCE",
"DATETIME",
"N",
"METADATA",
"TAG"
],
"printable": false,
"xmltag": "comment"
}
},
{
"class": "Description",
"properties": {
"annotationtype": "DESCRIPTION",
"label": "Description",
"occurrences": 1,
"optional_attribs": [
"ID",
"ANNOTATOR",
"CONFIDENCE",
"DATETIME",
"N",
"METADATA",
"TAG"
],
"xmltag": "desc"
}
},
{
"class": "External",
"properties": {
"accepted_data": null,
"annotationtype": "EXTERNAL",
"label": "External",
"optional_attribs": [
"ID",
"ANNOTATOR",
"CONFIDENCE",
"DATETIME",
"N",
"METADATA",
"BEGINTIME",
"ENDTIME",
"TAG"
],
"printable": true,
"required_attribs": [
"SRC"
],
"speakable": true,
"xmltag": "external"
}
},
{
"class": "Feature",
"elements": [
{
"class": "ActorFeature",
"properties": {
"subset": "actor",
"xmltag": null
}
},
{
"class": "BegindatetimeFeature",
"properties": {
"subset": "begindatetime",
"xmltag": null
}
},
{
"class": "EnddatetimeFeature",
"properties": {
"subset": "enddatetime",
"xmltag": null
}
},
{
"class": "FunctionFeature",
"properties": {
"subset": "function",
"xmltag": null
}
},
{
"class": "HeadFeature",
"properties": {
"subset": "head",
"xmltag": null
}
},
{
"class": "LevelFeature",
"properties": {
"subset": "level",
"xmltag": null
}
},
{
"class": "ModalityFeature",
"properties": {
"subset": "mod",
"xmltag": null
}
},
{
"class": "PolarityFeature",
"properties": {
"subset": "polarity",
"xmltag": null
}
},
{
"class": "StrengthFeature",
"properties": {
"subset": "strength",
"xmltag": null
}
},
{
"class": "StyleFeature",
"properties": {
"subset": "style",
"xmltag": null
}
},
{
"class": "SynsetFeature",
"properties": {
"subset": "synset",
"xmltag": null
}
},
{
"class": "TimeFeature",
"properties": {
"subset": "time",
"xmltag": null
}
},
{
"class": "ValueFeature",
"properties": {
"subset": "value",
"xmltag": null
}
},
{
"class": "FontFeature",
"properties": {
"subset": "font",
"xmltag": null
}
},
{
"class": "SizeFeature",
"properties": {
"subset": "size",
"xmltag": null
}
}
],
"properties": {
"label": "Feature",
"xmltag": "feat"
}
},
{
"class": "Metric",
"properties": {
"accepted_data": [
"Feature",
"ValueFeature",
"ForeignData"
],
"annotationtype": "METRIC",
"label": "Metric",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"CONFIDENCE",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"SPEAKER",
"METADATA",
"TAG"
],
"xmltag": "metric"
}
},
{
"class": "String",
"properties": {
"accepted_data": [
"AbstractInlineAnnotation",
"Relation",
"Correction",
"Feature",
"ForeignData",
"Metric",
"PhonContent",
"TextContent"
],
"annotationtype": "STRING",
"label": "String",
"occurrences": 0,
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"CONFIDENCE",
"DATETIME",
"N",
"SRC",
"BEGINTIME",
"ENDTIME",
"METADATA",
"TAG"
],
"printable": true,
"xmltag": "str"
}
},
{
"class": "ForeignData",
"properties": {
"xmltag": "foreign-data"
}
},
{
"class": "Gap",
"properties": {
"accepted_data": [
"Content",
"Feature",
"Metric",
"Part",
"ForeignData"
],
"annotationtype": "GAP",
"label": "Gap",
"optional_attribs": [
"ID",
"CLASS",
"ANNOTATOR",
"N",
"DATETIME",
"SRC",
"BEGINTIME",
"ENDTIME",
"METADATA",
"TAG"
],
"xmltag": "gap"
}
}
],
"properties": {
"optional_attribs": null,
"required_attribs": null
}
},
{
"class": "AbstractContentAnnotation",
"elements": [
{
"class": "TextContent",
"properties": {
"accepted_data": [
"AbstractTextMarkup",
"Linebreak",
"Feature"
],
"annotationtype": "TEXT",
"label": "Text",
"printable": true,
"speakable": false,
"textcontainer": true,
"xlink": true,
"xmltag": "t"
}
},
{
"class": "PhonContent",
"properties": {
"accepted_data": [
"Feature"
],
"annotationtype": "PHON",
"label": "Phonetic Content",
"phoncontainer": true,
"printable": false,
"speakable": true,
"xmltag": "ph"
}
},
{
"class": "Content",
"properties": {
"annotationtype": "RAWCONTENT",
"label": "Raw Content",
"occurrences": 1,
"printable": true,
"xmltag": "content"
}
}
],
"properties": {
"occurrences": 0,
"optional_attribs": [
"CLASS",
"ANNOTATOR",
"CONFIDENCE",
"DATETIME",
"METADATA",
"TAG"
],
"required_attribs": null
}
},
{
"class": "WordReference",
"properties": {
"optional_attribs": [
"IDREF",
"TAG"
],
"xmltag": "wref"
}
},
{
"class": "LinkReference",
"properties": {
"optional_attribs": [
"IDREF",
"TAG"
],
"xmltag": "xref"
}
}
],
"namespace": "http://ilk.uvt.nl/folia",
"oldtags": {
"alignment": "relation",
"aref": "xref",
"complexalignment": "spanrelation",
"complexalignments": "spanrelations",
"listitem": "item"
},
"setdefinitionnamespace": "http://folia.science.ru.nl/setdefinition",
"structurescope": [
"Sentence",
"Paragraph",
"Division",
"ListItem",
"Text",
"Event",
"Caption",
"Head"
],
"version": "2.5.0",
"wrefables": [
"Word",
"Hiddenword",
"Morpheme",
"Phoneme"
]
}; | PypiClean |
/GPGO-0.1.2.tar.gz/GPGO-0.1.2/README.md | # GPGO - Gaussian Process GO
My own implementation of a Bayesian Black box Optimization with Gaussian Process as a surrogate model.
It is still in development as I'm using it for my Master degree thesis to achieve a bottom up optimization of the Dissipative
Particle Dynamics force field for a complex system of polymers chains functionalized gold nanoparticles in a water solvent.
# Hyperparameters
The Hyperparameters of the GP are optimized by the common technique of maximizing the Log Marginal Likelihood. In this repository this is achieved by using a search grid (although not in an efficient way) or by using the scipy optimizer module (L-BFGS-B, TNC, SLSCP).
The analytical gradient is implemented for the Radial Basis Function kernel and it is possible to use the derivate of the Log Marginal Likelihood to optimize the hyperparameters.
<a href="https://ibb.co/D8yvW3x"><img src="https://i.ibb.co/pR8MwCt/Figure-6.png" alt="Figure-6" border="0"></a>
# Acquisition function
As it is there are two different acquisition function implemented right now:
-Expected Improvement (EI)
-UCB (Upper Confidence Bound)
# Maximizing the Acquisition function
In this little package right now there are 3 ways to run an optimization task with Gaussian Processes:
-NAIVE : AkA sampling the acquisition function with a grid of some kind or a quasi random methods as LHS (require smt package)
-BFGS : optimize the Acquisition function by using the L-BFGS-B optimizer
-DIRECT : optimize the Acquisition function by using the DIRECT optimizer (require DIRECT python package)
<a href="https://ibb.co/GPSM0cm"><img src="https://i.ibb.co/f0wN24J/Figure-7.png" alt="Figure-7" border="0"></a>
# TODO
-Tutorials and Examples
-Good code practice maybe
-An integration with LAMMPS using the pyLammps routine
| PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/codemirror/mode/brainfuck/brainfuck.js |
// Brainfuck mode created by Michael Kaminsky https://github.com/mkaminsky11
(function(mod) {
if (typeof exports == "object" && typeof module == "object")
mod(require("../../lib/codemirror"))
else if (typeof define == "function" && define.amd)
define(["../../lib/codemirror"], mod)
else
mod(CodeMirror)
})(function(CodeMirror) {
"use strict"
var reserve = "><+-.,[]".split("");
/*
comments can be either:
placed behind lines
+++ this is a comment
where reserved characters cannot be used
or in a loop
[
this is ok to use [ ] and stuff
]
or preceded by #
*/
CodeMirror.defineMode("brainfuck", function() {
return {
startState: function() {
return {
commentLine: false,
left: 0,
right: 0,
commentLoop: false
}
},
token: function(stream, state) {
if (stream.eatSpace()) return null
if(stream.sol()){
state.commentLine = false;
}
var ch = stream.next().toString();
if(reserve.indexOf(ch) !== -1){
if(state.commentLine === true){
if(stream.eol()){
state.commentLine = false;
}
return "comment";
}
if(ch === "]" || ch === "["){
if(ch === "["){
state.left++;
}
else{
state.right++;
}
return "bracket";
}
else if(ch === "+" || ch === "-"){
return "keyword";
}
else if(ch === "<" || ch === ">"){
return "atom";
}
else if(ch === "." || ch === ","){
return "def";
}
}
else{
state.commentLine = true;
if(stream.eol()){
state.commentLine = false;
}
return "comment";
}
if(stream.eol()){
state.commentLine = false;
}
}
};
});
CodeMirror.defineMIME("text/x-brainfuck","brainfuck")
}); | PypiClean |
/7Wonder-RL-Lib-0.1.1.tar.gz/7Wonder-RL-Lib-0.1.1/docs/_static/js/html5shiv.min.js | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); | PypiClean |
/MarkDo-0.3.0.tar.gz/MarkDo-0.3.0/markdo/static/bower/codemirror/mode/htmlembedded/htmlembedded.js | CodeMirror.defineMode("htmlembedded", function(config, parserConfig) {
//config settings
var scriptStartRegex = parserConfig.scriptStartRegex || /^<%/i,
scriptEndRegex = parserConfig.scriptEndRegex || /^%>/i;
//inner modes
var scriptingMode, htmlMixedMode;
//tokenizer when in html mode
function htmlDispatch(stream, state) {
if (stream.match(scriptStartRegex, false)) {
state.token=scriptingDispatch;
return scriptingMode.token(stream, state.scriptState);
}
else
return htmlMixedMode.token(stream, state.htmlState);
}
//tokenizer when in scripting mode
function scriptingDispatch(stream, state) {
if (stream.match(scriptEndRegex, false)) {
state.token=htmlDispatch;
return htmlMixedMode.token(stream, state.htmlState);
}
else
return scriptingMode.token(stream, state.scriptState);
}
return {
startState: function() {
scriptingMode = scriptingMode || CodeMirror.getMode(config, parserConfig.scriptingModeSpec);
htmlMixedMode = htmlMixedMode || CodeMirror.getMode(config, "htmlmixed");
return {
token : parserConfig.startOpen ? scriptingDispatch : htmlDispatch,
htmlState : CodeMirror.startState(htmlMixedMode),
scriptState : CodeMirror.startState(scriptingMode)
};
},
token: function(stream, state) {
return state.token(stream, state);
},
indent: function(state, textAfter) {
if (state.token == htmlDispatch)
return htmlMixedMode.indent(state.htmlState, textAfter);
else if (scriptingMode.indent)
return scriptingMode.indent(state.scriptState, textAfter);
},
copyState: function(state) {
return {
token : state.token,
htmlState : CodeMirror.copyState(htmlMixedMode, state.htmlState),
scriptState : CodeMirror.copyState(scriptingMode, state.scriptState)
};
},
electricChars: "/{}:",
innerMode: function(state) {
if (state.token == scriptingDispatch) return {state: state.scriptState, mode: scriptingMode};
else return {state: state.htmlState, mode: htmlMixedMode};
}
};
}, "htmlmixed");
CodeMirror.defineMIME("application/x-ejs", { name: "htmlembedded", scriptingModeSpec:"javascript"});
CodeMirror.defineMIME("application/x-aspx", { name: "htmlembedded", scriptingModeSpec:"text/x-csharp"});
CodeMirror.defineMIME("application/x-jsp", { name: "htmlembedded", scriptingModeSpec:"text/x-java"});
CodeMirror.defineMIME("application/x-erb", { name: "htmlembedded", scriptingModeSpec:"ruby"}); | PypiClean |
/MD_MTL-0.0.9-py3-none-any.whl/Vampyr_MTL/functions/MTL_Cluster_Least_L21.py | import numpy as np
from .init_opts import init_opts
from numpy import linalg as LA
from tqdm import tqdm
from tqdm import trange
import sys
import time
from scipy.sparse import identity
from scipy import linalg
from scipy.sparse.linalg import spsolve
from scipy.sparse import isspmatrix
class MTL_Cluster_Least_L21:
"""Clustered MTL algorithm with least square regression and L21 penalty
"""
def __init__(self, opts, k, rho1=10, rho2=0.1):
"""Initialization of CMTL function
Args:
opts (opts): initalization class from opts
k (integer): number of clusters predefined
rho1 (int, optional): [description]. Defaults to 10.
rho2 (float, optional): [description]. Defaults to 0.1.
"""
self.opts = init_opts(opts)
self.rho1 = rho1
self.rho2 = rho2
self.rho_L2 = 0
self.k = k
if hasattr(opts, 'rho_L2'):
rho_L2 = opts.rho_L2
def fit(self, X, Y, **kwargs):
"""Fit with training samples and train
t: task number
n: number of entries
d: data dimension
Args:
X ([np.array]): t x n x d
Y ([np.array]): t x n x 1
"""
if 'rho' in kwargs.keys():
print(kwargs)
self.rho1 = kwargs['rho']
X_new = []
for i in range(len(X)):
X_new.append(np.transpose(X[i]))
X = X_new
self.X = X
self.Y = Y
# transpose to size: t x d x n
self.task_num = len(X)
self.dimension, _ = X[0].shape
self.eta = self.rho2/self.rho1
self.c = self.rho1 * self.eta * (1+self.eta)
funcVal = []
self.XY = [0]* self.task_num
W0_prep = []
for t in range(self.task_num):
self.XY[t] = X[t] @ Y[t]
W0_prep.append(self.XY[t].reshape((-1,1)))
W0_prep = np.hstack(W0_prep)
if hasattr(self.opts,'W0'):
W0=self.opts.W0
elif self.opts.init==2:
W0 = np.zeros((self.dimension, self.task_num))
elif self.opts.init == 0:
W0 =W0_prep
else:
W0 = np.random.normal(0, 1, (self.dimension, self.task_num))
M0 = np.array(identity(self.task_num)) * self.k / self.task_num
# this flag checks if gradient descent only makes significant step
bFlag=0
Wz= W0
Wz_old = W0
Mz = M0.toarray()
Mz_old = M0.toarray()
t = 1
t_old = 0
it = 0
gamma = 1.0
gamma_inc = 2
for it in trange(self.opts.maxIter, file=sys.stdout, desc='outer loop'):
alpha = (t_old - 1)/t
Ws = (1 + alpha) * Wz - alpha * Wz_old
if(isspmatrix(Mz)):
Mz = Mz.toarray()
if(isspmatrix(Mz_old)):
Mz_old = Mz_old.toarray()
Ms = (1 + alpha) * Mz - alpha * Mz_old
# compute function value and gradients of the search point
gWs, gMs, Fs = self.gradVal_eval(Ws, Ms)
in_it = 0
# for in_it in trange(2,file=sys.stdout, leave=False, unit_scale=True, desc='inner loop'):
for in_it in trange(1000,file=sys.stdout, leave=False, unit_scale=True, desc='inner loop'):
Wzp = Ws - gWs/gamma
Mzp, Mzp_Pz, Mzp_DiagSigz = self.singular_projection (Ms - gMs/gamma, self.k)
Fzp = self.funVal_eval(Wzp, Mzp_Pz, Mzp_DiagSigz)
delta_Wzs = Wzp - Ws
delta_Mzs = Mzp - Ms
r_sum = (LA.norm(delta_Wzs)**2 + LA.norm(delta_Mzs)**2)/2
Fzp_gamma = Fs + np.sum(delta_Wzs*gWs) + np.sum(delta_Mzs*gMs) + gamma * r_sum
if (r_sum <=1e-20):
bFlag=1 # this shows that, the gradient step makes little improvement
break
if (Fzp <= Fzp_gamma):
break
else:
gamma = gamma * gamma_inc
Wz_old = Wz
Wz = Wzp
Mz_old = Mz
Mz = Mzp
funcVal.append(Fzp)
if (bFlag):
print('\n The program terminates as the gradient step changes the solution very small.')
break
if (self.opts.tFlag == 0):
if it >= 2:
if (abs(funcVal[-1] - funcVal[-2]) <= self.opts.tol):
break
elif(self.opts.tFlag == 1):
if it >= 2:
if (abs(funcVal[-1] - funcVal[-2]) <= self.opts.tol * funcVal[-2]):
break
elif(self.opts.tFlag == 2):
if (funcVal[-1] <= self.opts.tol):
break
elif(self.opts.tFlag == 3):
if it >= self.opts.maxIter:
break
t_old = t
t = 0.5 * (1 + (1 + 4 * t ** 2) ** 0.5)
self.W = Wzp
self.M = Mzp
self.funcVal = funcVal
def singular_projection (self, Msp, k):
"""Projection of data
Args:
Msp (np.array(np.array)): M matrix with shape: (t, t)
k (int): number of tasks
Returns:
(tuple): tuple containing:
W (np.array(np.array)): weight matrix of shape (d, t)
M_Pz (np.array(np.array)): M matrix of shape (t, t)
M_DiagSigz (np.array(np.array)): diagnolized M matrix optimized by bsa_ihb
"""
# l2.1 norm projection.
EValue, EVector = LA.eig(Msp)
idx = EValue.argsort()
EValue = EValue[idx]
EVector = EVector[:,idx]
Pz = np.real(EVector)
diag_EValue = np.real(EValue)
DiagSigz, _, _ = self.bsa_ihb(diag_EValue, k)
Mzp = Pz @ np.diag(DiagSigz) @ Pz.T
Mzp_Pz = Pz
Mzp_DiagSigz = DiagSigz
return Mzp, Mzp_Pz, Mzp_DiagSigz
def bsa_ihb(self, eig_value, k):
"""continuous quadratic knapsack problem solve in linear time
Singular Projection
min 1/2*||x - eig_value||_2^2
s.t. b'\*x = k, 0<= x <= u, b > 0
Args:
eig_value (np.array): eigenvalue of size (d, 1)
k (int): number of clusters
Returns:
(tuple): tuple containing:
x_star (np.array): optimized solution with Newton's Method, shape (d, 1)
t_star (float): intercepts
it (int): iteration
"""
break_flag = 0
b = np.ones(eig_value.shape)
u = np.ones(eig_value.shape)
t_l = eig_value/b
t_u = (eig_value - u)/b
T = np.concatenate((t_l, t_u), axis=0)
t_L = -np.Infinity
t_U = np.Infinity
g_tL = 0.
g_tU = 0.
it = 0
while(len(T)!=0):
it +=1
g_t = 0.
t_hat = np.median(T)
U = t_hat < t_u
M = (t_u <= t_hat) & (t_hat <= t_l)
if np.sum(U):
g_t += np.sum(b[U]*u[U])
if np.sum(M):
g_t += np.sum(b[M]*(eig_value[M]-t_hat*b[M]))
if g_t > k:
t_L = t_hat
T = T[T>t_hat]
g_tL = g_t
elif g_t <k:
t_U = t_hat
T = T[T<t_hat]
g_tU = g_t
else:
t_star = t_hat
break_flag = 1
break
if not break_flag:
eps = g_tU - g_tL
t_star = t_L - (g_tL - k) * (t_U - t_L)/(eps)
est = eig_value-t_star * b
if(np.isnan(est).any()):
est[np.isnan(est)] = 0
x_star = np.minimum(u, np.max(est, 0))
return x_star, t_star, it
def gradVal_eval(self, W, M):
"""Gradient Decent
Args:
W (np.array(np.array)): Weight Matrix with shape (d, t)
M (np.array(np.array)): M matrix shape (t, t)
Returns:
(tuple): tuple containing:
grad_W (np.array(np.array)): gradient matrix of weight, shape (d, t)
grad_M (np.array(np.array)): gradient matrix of M, shape (t, t)
funcval (float): loss
"""
IM = self.eta * identity(self.task_num)+M
# could be sparse matrix to solve
invEtaMWt = linalg.inv(IM) @ W.T
if self.opts.pFlag:
# grad_W = zeros(zeros(W));
# # parfor i = 1:task_num
# # grad_W (i, :) = X{i}*(X{i}' * W(:,i)-Y{i})
pass
else:
grad_W = []
for i in range(self.task_num):
XWi = self.X[i].T @ W[:,i]
XTXWi = self.X[i] @ XWi
grad_W.append((XTXWi - self.XY[i]).reshape(-1,1))
grad_W = np.hstack(grad_W)
grad_W = grad_W + 2 * self.c * invEtaMWt.T
W2 = W.T @ W
grad_M = - self.c * [email protected](IM)@linalg.inv(IM)
funcVal = 0
if self.opts.pFlag:
pass
else:
for i in range(self.task_num):
funcVal = funcVal + 0.5 * LA.norm ((self.Y[i] - self.X[i].T @ W[:, i]), ord=2)**2
funcVal = funcVal + self.c * np.trace( W @ invEtaMWt)
return grad_W, grad_M, funcVal
def funVal_eval(self, W, M_Pz, M_DiagSigz):
"""Loss accumulation
Args:
W (np.array(np.array)): weight matrix of shape (d, t)
M_Pz (np.array(np.array)): M matrix of shape (t, t)
M_DiagSigz (np.array(np.array)): diagnolized M matrix optimized by bsa_ihb
Returns:
(float): loss
"""
invIM = M_Pz @ (np.diag(1/(self.eta + np.array(M_DiagSigz)))) @ M_Pz.T
invEtaMWt = invIM @ W.T
funcVal = 0
if self.opts.pFlag:
# parfor i = 1: task_num
# # funcVal = funcVal + 0.5 * norm (Y{i} - X{i}' * W(:, i))^2;
# # end
pass
else:
for i in range(self.task_num):
funcVal = funcVal + 0.5 * LA.norm ((self.Y[i] - self.X[i].T @ W[:, i]), ord=2)**2
funcVal = funcVal + self.c * np.trace(W @ invEtaMWt)
return funcVal
def get_params(self, deep = False):
"""Get inbult initalization params
Args:
deep (bool, optional): deep traverse. Defaults to False.
Returns:
(dict): dictionary of all inits
"""
return {'rho1':self.rho1, 'rho2':self.rho2,'opts':self.opts, 'k':self.k}
def get_weights(self):
"""Get weight matrix
Returns:
(np.array(np.array)): Weight matrix
"""
return self.W
def analyse(self):
"""Analyse weight matrix cross clusters with correlation
Returns:
(np.array(np.array)): correlation map
"""
# returns correlation matrix
kmCMTL_OrderedModel = np.zeros(self.W.shape)
clus_task_num = self.task_num//self.k
for i in range(self.k):
clusModel = self.W[:, i:self.task_num:self.k]
kmCMTL_OrderedModel[:, (i)*clus_task_num: (i+1)* clus_task_num] = clusModel
return 1-np.corrcoef(kmCMTL_OrderedModel) | PypiClean |
/NeuroTorch-0.0.1b2.tar.gz/NeuroTorch-0.0.1b2/src/neurotorch/callbacks/convergence.py | import time
from typing import Optional
import numpy as np
from .base_callback import BaseCallback
class ConvergenceTimeGetter(BaseCallback):
"""
Monitor the training process and return the time it took to pass the threshold.
"""
def __init__(
self,
*,
metric: str,
threshold: float,
minimize_metric: bool,
**kwargs
):
"""
Constructor for ConvergenceTimeGetter class.
:param metric: Name of the metric to monitor.
:type metric: str
:param threshold: Threshold value for the metric.
:type threshold: float
:param minimize_metric: Whether to minimize or maximize the metric.
:type minimize_metric: bool
:param kwargs: The keyword arguments to pass to the BaseCallback.
"""
super().__init__(**kwargs)
self.threshold = threshold
self.metric = metric
self.minimize_metric = minimize_metric
self.threshold_met = False
self.time_convergence = np.inf
self.itr_convergence = np.inf
self.training_time = np.inf
self.start_time = None
def load_checkpoint_state(self, trainer, checkpoint: dict, **kwargs):
if self.save_state:
state = checkpoint.get(self.name, {})
if state.get("threshold") == self.threshold and state.get("metric") == self.metric:
super().load_checkpoint_state(trainer, checkpoint)
self.start_time = time.time()
if np.isfinite(state.get("time_convergence")):
self.start_time -= state.get("training_time", 0)
# TODO: change start time and add training time, etc.
def start(self, trainer, **kwargs):
super().start(trainer)
self.start_time = time.time()
def close(self, trainer, **kwargs):
self.training_time = time.time() - self.start_time
def on_iteration_end(self, trainer, **kwargs):
if not self.threshold_met:
if self.minimize_metric:
self.threshold_met = trainer.current_training_state.itr_metrics[self.metric] < self.threshold
else:
self.threshold_met = trainer.current_training_state.itr_metrics[self.metric] > self.threshold
if self.threshold_met:
self.time_convergence = time.time() - self.start_time
self.itr_convergence = trainer.current_training_state.iteration
def __repr__(self):
repr_str = f"ConvergenceTimeGetter("
repr_str += f"metric={self.metric}, "
repr_str += f"threshold={self.threshold}, "
repr_str += f"minimize_metric={self.minimize_metric})"
repr_str += f"<time_convergence={self.time_convergence} [s], "
repr_str += f"itr_convergence={self.itr_convergence}, "
repr_str += f"training_time={self.training_time} [s]>"
return repr_str | PypiClean |
/ChemDataExtractor-1.3.0-py3-none-any.whl/chemdataextractor/nlp/lexicon.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import six
from ..data import load_model
from ..text import word_shape, is_ascii, is_punct, like_url, like_number
from ..text.normalize import Normalizer, ChemNormalizer
from ..utils import Singleton
log = logging.getLogger(__name__)
class Lexeme(object):
""""""
__slots__ = ('text', 'normalized', 'lower', 'first', 'suffix', 'shape', 'length', 'upper_count', 'lower_count',
'digit_count', 'is_alpha', 'is_ascii', 'is_digit', 'is_lower', 'is_upper', 'is_title', 'is_punct',
'is_hyphenated', 'like_url', 'like_number', 'cluster')
def __init__(self, text, normalized, lower, first, suffix, shape, length, upper_count, lower_count, digit_count,
is_alpha, is_ascii, is_digit, is_lower, is_upper, is_title, is_punct, is_hyphenated, like_url,
like_number, cluster):
#: Original Lexeme text.
self.text = text
#: The Brown Word Cluster for this Lexeme.
self.cluster = cluster
#: Normalized text, using the Lexicon Normalizer.
self.normalized = normalized
#: Lowercase text.
self.lower = lower
#: First character.
self.first = first
#: Three-character suffix
self.suffix = suffix
#: Word shape. Derived by replacing every number with `d', every greek letter with `g', and every latin letter with `X' or `x' for uppercase and lowercase respectively.
self.shape = shape
#: Lexeme length.
self.length = length
#: Count of uppercase characters.
self.upper_count = upper_count
#: Count of lowercase characters.
self.lower_count = lower_count
#: Count of digits.
self.digit_count = digit_count
#: Whether the text is entirely alphabetical characters.
self.is_alpha = is_alpha
#: Whether the text is entirely ASCII characters.
self.is_ascii = is_ascii
#: Whether the text is entirely digits.
self.is_digit = is_digit
#: Whether the text is entirely lowercase.
self.is_lower = is_lower
#: Whether the text is entirely uppercase.
self.is_upper = is_upper
#: Whether the text is title cased.
self.is_title = is_title
#: Whether the text is entirely punctuation characters.
self.is_punct = is_punct
#: Whether the text is hyphenated.
self.is_hyphenated = is_hyphenated
#: Whether the text looks like a URL.
self.like_url = like_url
#: Whether the text looks like a number.
self.like_number = like_number
class Lexicon(six.with_metaclass(Singleton)):
""""""
#: The Normalizer for this Lexicon.
normalizer = Normalizer()
#: Path to the Brown clusters model file for this Lexicon.
clusters_path = None
def __init__(self):
""""""
self.lexemes = {}
self.clusters = {}
self._loaded_clusters = False
def __len__(self):
"""The current number of lexemes stored."""
return len(self.lexemes)
def add(self, text):
"""Add text to the lexicon.
:param string text: The text to add.
"""
# logging.debug('Adding to lexicon: %s' % text)
if text not in self.lexemes:
normalized = self.normalized(text)
self.lexemes[text] = Lexeme(
text=text,
normalized=normalized,
lower=self.lower(normalized),
first=self.first(normalized),
suffix=self.suffix(normalized),
shape=self.shape(normalized),
length=self.length(normalized),
upper_count=self.upper_count(normalized),
lower_count=self.lower_count(normalized),
digit_count=self.digit_count(normalized),
is_alpha=self.is_alpha(normalized),
is_ascii=self.is_ascii(normalized),
is_digit=self.is_digit(normalized),
is_lower=self.is_lower(normalized),
is_upper=self.is_upper(normalized),
is_title=self.is_title(normalized),
is_punct=self.is_punct(normalized),
is_hyphenated=self.is_hyphenated(normalized),
like_url=self.like_url(normalized),
like_number=self.like_number(normalized),
cluster=self.cluster(normalized)
)
def __getitem__(self, text):
"""Return the requested lexeme from the Lexicon.
:param string text: Text of the lexeme to retrieve.
:rtype: Lexeme
:returns: The requested Lexeme.
"""
self.add(text)
return self.lexemes[text]
def cluster(self, text):
""""""
if not self._loaded_clusters and self.clusters_path:
self.clusters = load_model(self.clusters_path)
self._loaded_clusters = True
return self.clusters.get(text, None)
def normalized(self, text):
""""""
return self.normalizer(text)
def lower(self, text):
""""""
return text.lower()
def first(self, text):
""""""
return text[:1]
def suffix(self, text):
""""""
return text[-3:]
def shape(self, text):
""""""
return word_shape(text)
def length(self, text):
""""""
return len(text)
def digit_count(self, text):
""""""
return sum(c.isdigit() for c in text)
def upper_count(self, text):
""""""
return sum(c.isupper() for c in text)
def lower_count(self, text):
""""""
return sum(c.islower() for c in text)
def is_alpha(self, text):
""""""
return text.isalpha()
def is_ascii(self, text):
""""""
return is_ascii(text)
def is_digit(self, text):
""""""
return text.isdigit()
def is_lower(self, text):
""""""
return text.islower()
def is_upper(self, text):
""""""
return text.isupper()
def is_title(self, text):
""""""
return text.istitle()
def is_punct(self, text):
""""""
return is_punct(text)
def is_hyphenated(self, text):
""""""
# TODO: What about '--'?
return '-' in text and not text == '-'
def like_url(self, text):
""""""
return like_url(text)
def like_number(self, text):
""""""
return like_number(text)
class ChemLexicon(Lexicon):
"""A Lexicon that is pre-configured with a Chemistry-aware Normalizer and Brown word clusters derived from a
chemistry corpus."""
normalizer = ChemNormalizer()
clusters_path = 'models/clusters_chem1500-1.0.pickle' | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/system_settings_response_py3.py |
from msrest.serialization import Model
class SystemSettingsResponse(Model):
"""SystemSettingsResponse.
:param organization_name: The organization name
:type organization_name: str
:param display_organization_name_in_header: Indicates whether or not
organization name should be displayed in the application header
:type display_organization_name_in_header: bool
:param organization_logo: The organization logo
May be a base64-encoded PNG, JPG, or SVG image or a URI for an image
hosted online
A null value will clear the saved image
:type organization_logo: str
:param automatic_logout_minutes: Number of minutes after which users will
be automatically logged out
:type automatic_logout_minutes: int
:param default_country: Default country for new users
:type default_country: str
:param default_meter_time_zone:
:type default_meter_time_zone: ~energycap.sdk.models.TimeZoneResponse
:param default_date_format: Default date format for new users
:type default_date_format: str
:param default_topmost_place:
:type default_topmost_place: ~energycap.sdk.models.PlaceChild
:param default_cost_center:
:type default_cost_center: ~energycap.sdk.models.CostCenterChild
:param default_user_role:
:type default_user_role: ~energycap.sdk.models.SystemUserRoleChild
:param months_to_exclude_from_charts: Number of months to exclude from
powerview charts
:type months_to_exclude_from_charts: int
"""
_attribute_map = {
'organization_name': {'key': 'organizationName', 'type': 'str'},
'display_organization_name_in_header': {'key': 'displayOrganizationNameInHeader', 'type': 'bool'},
'organization_logo': {'key': 'organizationLogo', 'type': 'str'},
'automatic_logout_minutes': {'key': 'automaticLogoutMinutes', 'type': 'int'},
'default_country': {'key': 'defaultCountry', 'type': 'str'},
'default_meter_time_zone': {'key': 'defaultMeterTimeZone', 'type': 'TimeZoneResponse'},
'default_date_format': {'key': 'defaultDateFormat', 'type': 'str'},
'default_topmost_place': {'key': 'defaultTopmostPlace', 'type': 'PlaceChild'},
'default_cost_center': {'key': 'defaultCostCenter', 'type': 'CostCenterChild'},
'default_user_role': {'key': 'defaultUserRole', 'type': 'SystemUserRoleChild'},
'months_to_exclude_from_charts': {'key': 'monthsToExcludeFromCharts', 'type': 'int'},
}
def __init__(self, *, organization_name: str=None, display_organization_name_in_header: bool=None, organization_logo: str=None, automatic_logout_minutes: int=None, default_country: str=None, default_meter_time_zone=None, default_date_format: str=None, default_topmost_place=None, default_cost_center=None, default_user_role=None, months_to_exclude_from_charts: int=None, **kwargs) -> None:
super(SystemSettingsResponse, self).__init__(**kwargs)
self.organization_name = organization_name
self.display_organization_name_in_header = display_organization_name_in_header
self.organization_logo = organization_logo
self.automatic_logout_minutes = automatic_logout_minutes
self.default_country = default_country
self.default_meter_time_zone = default_meter_time_zone
self.default_date_format = default_date_format
self.default_topmost_place = default_topmost_place
self.default_cost_center = default_cost_center
self.default_user_role = default_user_role
self.months_to_exclude_from_charts = months_to_exclude_from_charts | PypiClean |
/Copreus-0.4.0.tar.gz/Copreus-0.4.0/copreus/schema/drivermanager.py | import copreus.schema.adc
import copreus.schema.bme_280
import copreus.schema.dac
import copreus.schema.dht
import copreus.schema.epaperdirect
import copreus.schema.epapersimple
import copreus.schema.input
import copreus.schema.pollinginput
import copreus.schema.output
import copreus.schema.rotaryencoder
import copreus.schema.rotaryencoder2
import copreus.schema.rfid
import copreus.schema.rgbled
def _add_driver_schema(schema, driver_schema):
driver_schema["driver"]["properties"]["active"] = {
"description": "if set to false, the driver will not be loaded",
"type": "boolean"
}
driver_schema["driver"]["required"].append("name")
driver_schema["driver"]["required"].append("active")
schema["drivers"]["items"]["oneOf"].append(driver_schema["driver"])
def get_schema():
schema = {
"drivers": {
"description": "Drivermanager configuration.",
"type": "array",
"items": {
"oneOf": [
]
},
"additionalItems": False
}
}
_add_driver_schema(schema, copreus.schema.adc.get_schema())
_add_driver_schema(schema, copreus.schema.bme_280.get_schema())
_add_driver_schema(schema, copreus.schema.dac.get_schema())
_add_driver_schema(schema, copreus.schema.dht.get_schema())
_add_driver_schema(schema, copreus.schema.epaperdirect.get_schema())
_add_driver_schema(schema, copreus.schema.epapersimple.get_schema())
_add_driver_schema(schema, copreus.schema.input.get_schema())
_add_driver_schema(schema, copreus.schema.pollinginput.get_schema())
_add_driver_schema(schema, copreus.schema.output.get_schema())
_add_driver_schema(schema, copreus.schema.rotaryencoder.get_schema())
_add_driver_schema(schema, copreus.schema.rotaryencoder2.get_schema())
_add_driver_schema(schema, copreus.schema.rfid.get_schema())
_add_driver_schema(schema, copreus.schema.rgbled.get_schema())
return schema | PypiClean |
/FIRSTBEATLU-0.13.1.tar.gz/FIRSTBEATLU-0.13.1/econml/dml/_rlearner.py | from abc import abstractmethod
import numpy as np
import copy
from warnings import warn
from ..utilities import (shape, reshape, ndim, hstack, filter_none_kwargs, _deprecate_positional)
from sklearn.linear_model import LinearRegression
from sklearn.base import clone
from .._ortho_learner import _OrthoLearner
class _ModelNuisance:
"""
Nuisance model fits the model_y and model_t at fit time and at predict time
calculates the residual Y and residual T based on the fitted models and returns
the residuals as two nuisance parameters.
"""
def __init__(self, model_y, model_t):
self._model_y = model_y
self._model_t = model_t
def fit(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
assert Z is None, "Cannot accept instrument!"
self._model_t.fit(X, W, T, **filter_none_kwargs(sample_weight=sample_weight, groups=groups))
self._model_y.fit(X, W, Y, **filter_none_kwargs(sample_weight=sample_weight, groups=groups))
return self
def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
if hasattr(self._model_y, 'score'):
# note that groups are not passed to score because they are only used for fitting
Y_score = self._model_y.score(X, W, Y, **filter_none_kwargs(sample_weight=sample_weight))
else:
Y_score = None
if hasattr(self._model_t, 'score'):
# note that groups are not passed to score because they are only used for fitting
T_score = self._model_t.score(X, W, T, **filter_none_kwargs(sample_weight=sample_weight))
else:
T_score = None
return Y_score, T_score
def predict(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None):
Y_pred = self._model_y.predict(X, W)
T_pred = self._model_t.predict(X, W)
if (X is None) and (W is None): # In this case predict above returns a single row
Y_pred = np.tile(Y_pred.reshape(1, -1), (Y.shape[0], 1))
T_pred = np.tile(T_pred.reshape(1, -1), (T.shape[0], 1))
Y_res = Y - Y_pred.reshape(Y.shape)
T_res = T - T_pred.reshape(T.shape)
return Y_res, T_res
class _ModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - E[Y | X, W] = \\theta(X) \\cdot (T - E[T | X, W]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
"""
def __init__(self, model_final):
self._model_final = model_final
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None,
sample_weight=None, freq_weight=None, sample_var=None, groups=None):
Y_res, T_res = nuisances
self._model_final.fit(X, T, T_res, Y_res, sample_weight=sample_weight,
freq_weight=freq_weight, sample_var=sample_var)
return self
def predict(self, X=None):
return self._model_final.predict(X)
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, groups=None):
Y_res, T_res = nuisances
if Y_res.ndim == 1:
Y_res = Y_res.reshape((-1, 1))
if T_res.ndim == 1:
T_res = T_res.reshape((-1, 1))
effects = self._model_final.predict(X).reshape((-1, Y_res.shape[1], T_res.shape[1]))
Y_res_pred = np.einsum('ijk,ik->ij', effects, T_res).reshape(Y_res.shape)
if sample_weight is not None:
return np.mean(np.average((Y_res - Y_res_pred)**2, weights=sample_weight, axis=0))
else:
return np.mean((Y_res - Y_res_pred) ** 2)
class _RLearner(_OrthoLearner):
"""
Base class for CATE learners that residualize treatment and outcome and run residual on residual regression.
The estimator is a special of an :class:`._OrthoLearner` estimator,
so it follows the two
stage process, where a set of nuisance functions are estimated in the first stage in a crossfitting
manner and a final stage estimates the CATE model. See the documentation of
:class:`._OrthoLearner` for a description of this two stage process.
In this estimator, the CATE is estimated by using the following estimating equations:
.. math ::
Y - \\E[Y | X, W] = \\Theta(X) \\cdot (T - \\E[T | X, W]) + \\epsilon
Thus if we estimate the nuisance functions :math:`q(X, W) = \\E[Y | X, W]` and
:math:`f(X, W)=\\E[T | X, W]` in the first stage, we can estimate the final stage cate for each
treatment t, by running a regression, minimizing the residual on residual square loss:
.. math ::
\\hat{\\theta} = \\arg\\min_{\\Theta}\
\\E_n\\left[ (\\tilde{Y} - \\Theta(X) \\cdot \\tilde{T})^2 \\right]
Where :math:`\\tilde{Y}=Y - \\E[Y | X, W]` and :math:`\\tilde{T}=T-\\E[T | X, W]` denotes the
residual outcome and residual treatment.
Parameters
----------
discrete_treatment: bool
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
Unless an iterable is used, we call `split(concat[W, X], T)` to generate the splits. If all
W, X are None, then we call `split(ones((T.shape[0], 1)), T)`.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
mc_iters: int, optional
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
Examples
--------
The example code below implements a very simple version of the double machine learning
method on top of the :class:`._RLearner` class, for expository purposes.
For a more elaborate implementation of a Double Machine Learning child class of the class
checkout :class:`.DML` and its child classes:
.. testcode::
import numpy as np
from sklearn.linear_model import LinearRegression
from econml.dml._rlearner import _RLearner
from sklearn.base import clone
class ModelFirst:
def __init__(self, model):
self._model = clone(model, safe=False)
def fit(self, X, W, Y, sample_weight=None):
self._model.fit(np.hstack([X, W]), Y)
return self
def predict(self, X, W):
return self._model.predict(np.hstack([X, W]))
class ModelFinal:
def fit(self, X, T, T_res, Y_res, sample_weight=None, freq_weight=None, sample_var=None):
self.model = LinearRegression(fit_intercept=False).fit(X * T_res.reshape(-1, 1),
Y_res)
return self
def predict(self, X):
return self.model.predict(X)
class RLearner(_RLearner):
def _gen_model_y(self):
return ModelFirst(LinearRegression())
def _gen_model_t(self):
return ModelFirst(LinearRegression())
def _gen_rlearner_model_final(self):
return ModelFinal()
np.random.seed(123)
X = np.random.normal(size=(1000, 3))
y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.01, size=(1000,))
est = RLearner(cv=2, discrete_treatment=False, categories='auto', random_state=None)
est.fit(y, X[:, 0], X=np.ones((X.shape[0], 1)), W=X[:, 1:])
>>> est.const_marginal_effect(np.ones((1,1)))
array([0.999631...])
>>> est.effect(np.ones((1,1)), T0=0, T1=10)
array([9.996314...])
>>> est.score(y, X[:, 0], X=np.ones((X.shape[0], 1)), W=X[:, 1:])
9.73638006...e-05
>>> est.rlearner_model_final_.model
LinearRegression(fit_intercept=False)
>>> est.rlearner_model_final_.model.coef_
array([0.999631...])
>>> est.score_
9.82623204...e-05
>>> [mdl._model for mdls in est.models_y for mdl in mdls]
[LinearRegression(), LinearRegression()]
>>> [mdl._model for mdls in est.models_t for mdl in mdls]
[LinearRegression(), LinearRegression()]
Attributes
----------
models_y: nested list of objects of type(model_y)
A nested list of instances of the model_y object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
models_t: nested list of objects of type(model_t)
A nested list of instances of the model_t object. Number of sublist equals to number of monte carlo
iterations, each element in the sublist corresponds to a crossfitting
fold and is the model instance that was fitted for that training fold.
rlearner_model_final_ : object of type(model_final)
An instance of the model_final object that was fitted after calling fit.
score_ : float
The MSE in the final residual on residual regression
nuisance_scores_y : nested list of float
The out-of-sample scores for each outcome model
nuisance_scores_t : nested list of float
The out-of-sample scores for each treatment model
.. math::
\\frac{1}{n} \\sum_{i=1}^n (Y_i - \\hat{E}[Y|X_i, W_i]\
- \\hat{\\theta}(X_i)\\cdot (T_i - \\hat{E}[T|X_i, W_i]))^2
If `sample_weight` is not None at fit time, then a weighted average is returned. If the outcome Y
is multidimensional, then the average of the MSEs for each dimension of Y is returned.
"""
def __init__(self, *, discrete_treatment, categories, cv, random_state, mc_iters=None, mc_agg='mean'):
super().__init__(discrete_treatment=discrete_treatment,
discrete_instrument=False, # no instrument, so doesn't matter
categories=categories,
cv=cv,
random_state=random_state,
mc_iters=mc_iters,
mc_agg=mc_agg)
@abstractmethod
def _gen_model_y(self):
"""
Returns
-------
model_y: estimator of E[Y | X, W]
The estimator for fitting the response to the features and controls. Must implement
`fit` and `predict` methods. Unlike sklearn estimators both methods must
take an extra second argument (the controls), i.e. ::
model_y.fit(X, W, Y, sample_weight=sample_weight)
model_y.predict(X, W)
"""
pass
@abstractmethod
def _gen_model_t(self):
"""
Returns
-------
model_t: estimator of E[T | X, W]
The estimator for fitting the treatment to the features and controls. Must implement
`fit` and `predict` methods. Unlike sklearn estimators both methods must
take an extra second argument (the controls), i.e. ::
model_t.fit(X, W, T, sample_weight=sample_weight)
model_t.predict(X, W)
"""
pass
@abstractmethod
def _gen_rlearner_model_final(self):
"""
Returns
-------
model_final: estimator for fitting the response residuals to the features and treatment residuals
Must implement `fit` and `predict` methods. Unlike sklearn estimators the fit methods must
take an extra second argument (the treatment residuals). Predict, on the other hand,
should just take the features and return the constant marginal effect. More, concretely::
model_final.fit(X, T_res, Y_res,
sample_weight=sample_weight, freq_weight=freq_weight, sample_var=sample_var)
model_final.predict(X)
"""
pass
def _gen_ortho_learner_model_nuisance(self):
return _ModelNuisance(self._gen_model_y(), self._gen_model_t())
def _gen_ortho_learner_model_final(self):
return _ModelFinal(self._gen_rlearner_model_final())
def fit(self, Y, T, *, X=None, W=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None,
cache_values=False, inference=None):
"""
Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight : (n,) array like, default None
Individual weights for each sample. If None, it assumes equal weight.
freq_weight: (n, ) array like of integers, default None
Weight for the observation. Observation i is treated as the mean
outcome of freq_weight[i] independent observations.
When ``sample_var`` is not None, this should be provided.
sample_var : {(n,), (n, d_y)} nd array like, default None
Variance of the outcome(s) of the original freq_weight[i] observations that were used to
compute the mean outcome represented by observation i.
groups: (n,) vector, optional
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the `cv` argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache inputs and first stage results, which will allow refitting a different final model
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of:class:`.BootstrapInference`).
Returns
-------
self: _RLearner instance
"""
# Replacing fit from _OrthoLearner, to enforce Z=None and improve the docstring
return super().fit(Y, T, X=X, W=W,
sample_weight=sample_weight, freq_weight=freq_weight, sample_var=sample_var, groups=groups,
cache_values=cache_values,
inference=inference)
def score(self, Y, T, X=None, W=None, sample_weight=None):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample
T: (n, d_t) matrix or vector of length n
Treatments for each sample
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
# Replacing score from _OrthoLearner, to enforce Z=None and improve the docstring
return super().score(Y, T, X=X, W=W, sample_weight=sample_weight)
@property
def rlearner_model_final_(self):
# NOTE: important to get parent's wrapped copy so that
# after training wrapped featurizer is also trained, etc.
return self.ortho_learner_model_final_._model_final
@property
def models_y(self):
return [[mdl._model_y for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t(self):
return [[mdl._model_t for mdl in mdls] for mdls in super().models_nuisance_]
@property
def nuisance_scores_y(self):
return self.nuisance_scores_[0]
@property
def nuisance_scores_t(self):
return self.nuisance_scores_[1]
@property
def residuals_(self):
"""
A tuple (y_res, T_res, X, W), of the residuals from the first stage estimation
along with the associated X and W. Samples are not guaranteed to be in the same
order as the input order.
"""
if not hasattr(self, '_cached_values'):
raise AttributeError("Estimator is not fitted yet!")
if self._cached_values is None:
raise AttributeError("`fit` was called with `cache_values=False`. "
"Set to `True` to enable residual storage.")
Y_res, T_res = self._cached_values.nuisances
return Y_res, T_res, self._cached_values.X, self._cached_values.W | PypiClean |
/FlowMaster-0.7.1.tar.gz/FlowMaster-0.7.1/flowmaster/operators/base/policy.py | import datetime as dt
from abc import ABC
from typing import Optional, Union, Literal, TypeVar
import pendulum
from pydantic import BaseModel, PositiveInt, PrivateAttr, validator
PydanticModelT = TypeVar("PydanticModelT", bound=BaseModel)
class BasePolicy(BaseModel):
pools: Optional[list[str]] = None
concurrency: Optional[int] = None
class BaseNotificationServicePolicy(BaseModel):
on_retry: bool = False
on_success: bool = False
on_failure: bool = True
class _SchedulePolicy(BaseModel):
interval: Union[PositiveInt, Literal["daily", "hourly"]]
timezone: str
start_time: str
from_date: Optional[Union[str, pendulum.DateTime, dt.date, dt.datetime]] = None
period_length: int = 1
keep_sequence: bool = False
_start_datetime: dt.datetime = PrivateAttr()
_is_second_interval: bool = PrivateAttr()
_interval_timedelta: dt.timedelta = PrivateAttr()
@validator("keep_sequence")
def _validate_keep_sequence(cls, keep_sequence, values, **kwargs):
if values.get("from_date") is None and keep_sequence is True:
raise ValueError("keep_sequence cannot exist if from_date is missing")
return keep_sequence
@validator("from_date", pre=True)
def _validate_from_date(
cls, from_date: Optional[Union[str, pendulum.DateTime]], values, **kwargs
) -> Optional[pendulum.DateTime]:
if (
isinstance(from_date, pendulum.DateTime)
and from_date.timezone_name != values["timezone"]
):
from_date = from_date.astimezone(pendulum.timezone(values["timezone"]))
elif isinstance(from_date, str):
from_date = pendulum.parse(from_date, tz=values["timezone"])
elif isinstance(from_date, dt.date):
from_date = pendulum.parse(
from_date.strftime("%Y-%m-%dT%H:%M:%S"), tz=values["timezone"]
)
elif isinstance(from_date, dt.datetime):
from_date = pendulum.instance(from_date, tz=values["timezone"])
if from_date and not isinstance(from_date, pendulum.DateTime):
raise TypeError(f"{from_date=} is type {type(from_date)}")
return from_date
def _set_keep_sequence(self):
if self.from_date is not None:
self.keep_sequence = True
def _set_interval_timedelta(self):
if isinstance(self.interval, int):
self._interval_timedelta = dt.timedelta(seconds=self.interval)
elif self.interval == "daily":
self._interval_timedelta = dt.timedelta(days=1)
elif self.interval == "hourly":
self._interval_timedelta = dt.timedelta(hours=1)
else:
raise NotImplementedError(f"{self.interval} not supported")
def _set_is_second_interval(self):
self._is_second_interval = isinstance(self.interval, int)
def _set_start_datetime(self):
self._start_datetime = pendulum.parse(self.start_time, tz=self.timezone)
if self.from_date is not None:
self._start_datetime = self.from_date.replace(
hour=self._start_datetime.hour,
minute=self._start_datetime.minute,
second=self._start_datetime.second,
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._set_start_datetime()
self._set_is_second_interval()
self._set_interval_timedelta()
self._set_keep_sequence()
class BaseNotebook(BaseModel, ABC):
class WorkPolicy(BasePolicy):
class TriggersPolicy(BaseModel):
class SchedulePolicy(_SchedulePolicy):
...
schedule: SchedulePolicy
class NotificationsPolicy(BaseModel):
class CodexTelegramPolicy(BaseNotificationServicePolicy):
links: list[str]
codex_telegram: CodexTelegramPolicy = None
triggers: TriggersPolicy
notifications: Optional[NotificationsPolicy]
retries: int = 0
retry_delay: int = 60
time_limit_seconds_from_worktime: Optional[int] = None
soft_time_limit_seconds: Optional[int] = None
max_fatal_errors: int = 3
name: str
description: Optional[str] = None
work: Optional[WorkPolicy] = None
hash: str = ""
operator: str = "base"
class Config:
# For class NotebooksCollection.
operator = "base"
def __init__(self, **kwargs):
super(BaseNotebook, self).__init__(**kwargs)
assert hasattr(self.Config, "operator")
self.operator = self.Config.operator | PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/db/backends/mysql/operations.py | import decimal
import uuid
from djmodels.conf import settings
from djmodels.db.backends.base.operations import BaseDatabaseOperations
from djmodels.utils import timezone
from djmodels.utils.duration import duration_microseconds
from djmodels.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "djmodels.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = {
**BaseDatabaseOperations.integer_field_ranges,
'PositiveSmallIntegerField': (0, 65535),
'PositiveIntegerField': (0, 4294967295),
}
cast_data_types = {
'AutoField': 'signed integer',
'BigAutoField': 'signed integer',
'CharField': 'char(%(max_length)s)',
'TextField': 'char',
'IntegerField': 'signed integer',
'BigIntegerField': 'signed integer',
'SmallIntegerField': 'signed integer',
'PositiveIntegerField': 'unsigned integer',
'PositiveSmallIntegerField': 'unsigned integer',
}
cast_char_field_without_max_length = 'char'
explain_prefix = 'EXPLAIN'
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'week':
# Override the value of default_week_format for consistency with
# other database backends.
# Mode 3: Monday, 1-53, with 4 or more days this year.
return "WEEK(%s, 3)" % field_name
else:
# EXTRACT returns 1-53 based on ISO-8601 for the week number.
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
elif lookup_type == 'quarter':
return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % (
field_name, field_name
)
elif lookup_type == 'week':
return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % (
field_name, field_name
)
else:
return "DATE(%s)" % (field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', '%s')" % (field_name, tzname)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "DATE(%s)" % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return "TIME(%s)" % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
if lookup_type == 'quarter':
return (
"CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + "
"INTERVAL QUARTER({field_name}) QUARTER - " +
"INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)"
).format(field_name=field_name)
if lookup_type == 'week':
return (
"CAST(DATE_FORMAT(DATE_SUB({field_name}, "
"INTERVAL WEEKDAY({field_name}) DAY), "
"'%%Y-%%m-%%d 00:00:00') AS DATETIME)"
).format(field_name=field_name)
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return 'INTERVAL %s MICROSECOND' % duration_microseconds(timedelta)
def format_for_duration_arithmetic(self, sql):
return 'INTERVAL %s MICROSECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
return str(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return str(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
# Convert the result to a signed integer since MySQL's binary operators
# return an unsigned integer.
elif connector in ('&', '|', '<<'):
return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions)
elif connector == '>>':
lhs, rhs = sub_expressions
return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}
return super().combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s'
def convert_durationfield_value(self, value, expression, connection):
# DurationFields can return a Decimal in MariaDB.
if isinstance(value, decimal.Decimal):
value = float(value)
return super().convert_durationfield_value(value, expression, connection)
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if internal_type == 'TimeField':
if self.connection.mysql_is_mariadb:
# MariaDB includes the microsecond component in TIME_TO_SEC as
# a decimal. MySQL returns an integer without microseconds.
return '((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000)' % {
'lhs': lhs_sql, 'rhs': rhs_sql
}, lhs_params + rhs_params
return (
"((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2
else:
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params
def explain_query_prefix(self, format=None, **options):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other backends.
if format and format.upper() == 'TEXT':
format = 'TRADITIONAL'
prefix = super().explain_query_prefix(format, **options)
if format:
prefix += ' FORMAT=%s' % format
if self.connection.features.needs_explain_extended and format is None:
# EXTENDED and FORMAT are mutually exclusive options.
prefix += ' EXTENDED'
return prefix
def regex_lookup(self, lookup_type):
# REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE
# doesn't exist in MySQL 5.6 or in MariaDB.
if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb:
if lookup_type == 'regex':
return '%s REGEXP BINARY %s'
return '%s REGEXP %s'
match_option = 'c' if lookup_type == 'regex' else 'i'
return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option
def insert_statement(self, ignore_conflicts=False):
return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts) | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/utils/float.py | import numpy as np
from netket.utils.types import Array, Union
def comparable(
x: Array, *, bin_density: int = 3326400, offset: float = 5413 / 15629
) -> Array:
"""
Casts a floating point input to integer-indexed bins that are safe to compare
or hash.
Arguments:
x: the float array to be converted
bin_density: the inverse width of each bin. When binning rational numbers,
it's best to use a multiple of all expected denominators `bin_density`.
The default is :math:`3326400 = 2^6\times 3^3\times 5^2\times 7\times 11`.
offset: constant offset added to `bin_density * x` before rounding. To minimise
the chances of "interesting" numbers appearing on bin boundaries, it's
best to use a rational number with a large prime denominator.
The default is 5413/15629, both are primes.
Returns:
`x * bin_density + offset` rounded to an integer
Example:
>>> comparable([0.0, 0.3, 0.30000001, 1.3])
array([ 0, 997920, 997920, 4324320])
"""
return np.asarray(np.rint(np.asarray(x) * bin_density + offset), dtype=int)
def comparable_periodic(
x: Array,
where: Union[Array, bool] = True,
*,
bin_density: int = 3326400,
offset: float = 5413 / 15629,
) -> Array:
"""
Casts the fractional parts of floating point input to integer-indexed bins
that are safe to compare or hash.
Arguments:
x: the float array to be converted
where: specifies whether the fractional part (True) or the full value (False)
of the input is to be used. Must be broadcastable to `x.shape`.
If False, the output is the same as that of `comparable`.
bin_density: the inverse width of each bin. When binning rational numbers,
it's best to use a multiple of all expected denominators `bin_density`.
The default is :math:`3326400 = 2^6\times 3^3\times 5^2\times 7\times 11`.
offset: constant offset added to `bin_density * x` before rounding. To minimise
the chances of "interesting" numbers appearing on bin boundaries, it's
best to use a rational number with a large prime denominator.
The default is 5413/15629, both are primes.
Returns:
[`x` or frac(`x`)]` * bin_density + offset` rounded to an integer
Example:
>>> comparable_periodic([0.0, 0.3, 0.30000001, 1.3], where = [[True], [False]])
array([[ 0, 997920, 997920, 997920],
[ 0, 997920, 997920, 4324320]])
"""
bins = np.asarray(np.rint(np.asarray(x) * bin_density + offset), dtype=int)
return np.where(where, bins % bin_density, bins)
def _prune_zeros(x: Array, atol: float = 1e-08) -> Array:
# prunes nearly zero entries
x[np.isclose(x, 0.0, rtol=0.0, atol=atol)] = 0.0
return x
def prune_zeros(x: Array, atol: float = 1e-08) -> Array:
"""Prunes nearly zero real and imaginary parts"""
if np.iscomplexobj(x):
# Check if complex part is nonzero at all
if np.allclose(x.imag, 0.0, rtol=0.0, atol=atol):
return _prune_zeros(x.real)
else:
return _prune_zeros(x.real) + 1j * _prune_zeros(x.imag)
else:
return _prune_zeros(x)
def is_approx_int(x: Array, atol: float = 1e-08) -> Array:
"""
Returns `True` for all elements of the array that are within
`atol` to an integer.
"""
return np.isclose(x, np.rint(x), rtol=0.0, atol=atol) | PypiClean |
/HeartSounds-0.0.2-py3-none-any.whl/heartsounds/find_agreeing_points.py | import numpy as np
from enum import Enum
Agree_reduce_method = Enum("Agree_reduce_method", ("MEDIAN", "MAX"))
def find_agreeing_points(
agreement_time_threshold: float,
max_disagreers: int,
indices_by_detector: dict,
signal: np.ndarray,
sample_rate: float,
root_detector: str,
agree_reduce_method: Agree_reduce_method,
) -> np.ndarray:
signal_times = np.arange(len(signal)) / sample_rate
times_by_detector = {}
for detector in indices_by_detector:
indices = indices_by_detector[detector]
times_by_detector[detector] = signal_times[indices]
agreed_points = []
for peak_time in times_by_detector[root_detector]:
closest_by_detector = {}
for detector in times_by_detector:
times = times_by_detector[detector]
closest_by_detector[detector] = times[np.argmin(np.abs(times - peak_time))]
sorted_times = np.sort(list(closest_by_detector.values()))
mask = np.abs((sorted_times - peak_time)) < agreement_time_threshold
num_disagreers = np.sum(~mask)
if num_disagreers <= max_disagreers:
agreeing_times = sorted_times[mask]
if agree_reduce_method == Agree_reduce_method.MAX:
min_time = np.min(agreeing_times)
max_time = np.max(agreeing_times)
# find time of max peak in range
range_mask = (signal_times >= min_time) & (signal_times <= max_time)
time_range = signal_times[range_mask]
signal_range = signal[range_mask]
max_peak_time = time_range[np.argmax(signal_range)]
agreed_points.append(max_peak_time)
elif agree_reduce_method == Agree_reduce_method.MEDIAN:
agreeing_times = sorted_times[mask]
median = np.median(agreeing_times)
agreed_points.append(median)
else:
print(agree_reduce_method == Agree_reduce_method.MAX)
raise NotImplementedError(
f"unknown agree_reduce_method {agree_reduce_method}"
)
agreed_points = np.array(agreed_points)
return agreed_points | PypiClean |
/BeakerShowSessions-0.1.3.tar.gz/BeakerShowSessions-0.1.3/README.txt | BeakerShowSessions
==================
BeakerShowSessions is a Beaker_ extension that shows the active sessions
according to the given WSGI configuration file. Currently the plugin only works
with ext:database session storage backend.
You can find the Mercurial repository at bitbucket.org_
Installation
------------
easy_install::
$ <env>/bin/easy_install BeakerCleanup
pip::
$ <env>/bin/pip install BeakerCleanup
Upgrading from a previous version
---------------------------------
Note that in 0.1.3 the location of the session functions changed from
``beaker.scripts:get/show_sessions`` to
``beaker.scripts.sessions:get/show_sessions`` due to namespace issues.
Usage
-----
You can call get_sessions in order to get a list of active sessions (dicts)::
>>> from beaker.scripts.sessions import get_sessions
>>> get_sessions('cfg/production.ini')
BeakerShowSessions expects to find these keys in the `[app:main]` section of
your configuration file
- ``beaker.session.type = ext:database`` - the only supported backend (yet)
- ``beaker.session.url`` - an `SQLAlchemy engine URL`_
- ``beaker.session.timeout`` - session timeout in seconds
- ``beaker.session.table_name`` - (optional) session storage table. Defaults
to `beaker_cache`.
If your beaker configuration directive prefix is not `beaker.session` (or you
have multiple beaker instances) you can provide the correct prefix as a second
option::
>>> get_sessions('cfg/prod.ini', 'bkr.sess')
If you are going to use BeakerShowSessions separately you could choose to call
`show_sessions` instead. It takes the same parameters but returns a pretty ASCII
table with results, like this::
>>> print show_sessions('cfg/prod.ini')
--------------------------------------------------------
_accessed_time | _creation_time | user_name
--------------------------------------------------------
2001-02-03 10:11:12 | 2001-02-03 10:11:12 | [email protected]
PasteCall_ provides a convenient method to call `show_sessions` from the
console::
$ paster call beaker.scripts.sessions:show_sessions 'cfg/prod.ini' 'bkr.ses'
--------------------------------------------------------
_accessed_time | _creation_time | user_name
--------------------------------------------------------
2001-02-03 10:11:12 | 2001-02-03 10:11:12 | [email protected]
.. _Beaker: http://beaker.groovie.org
.. _SQLAlchemy engine URL: http://www.sqlalchemy.org/docs/05/dbengine.html#create-engine-url-arguments
.. _PasteCall: http://pypi.python.org/pypi/PasteCall
.. _bitbucket.org: http://bitbucket.org/kaukas/beakershowsessions
| PypiClean |
/Chips-0.1.2.tar.gz/Chips-0.1.2/examples/example_1_hello_world.py | import sys
from chips import * #use the chips library
################################################################################
##make a simulation model that prints to stdout
def make_chip(string, output):
return Chip(
output(
Sequence(*tuple((ord(i) for i in string))+(0,)),
)
)
if "simulate" in sys.argv:
#write the chip to the code generator plugin
chip=make_chip("helo byd!\n", Console)
chip.test("Example 1: Hello World .... in welsh!", stop_cycles=100)
if "simulate_vhdl" in sys.argv:
from chips.VHDL_plugin import Plugin #import VHDL plugin
#simulate using an external vhdl simulator
chip=make_chip("helo byd!\n", Console)
vhdl_plugin = Plugin()
chip.write_code(vhdl_plugin)
vhdl_plugin.ghdl_test("Example 1 : Hello world .... in welsh!", stop_cycles=2000)
if "simulate_cpp" in sys.argv:
from chips.cpp_plugin import Plugin#import C++ plugin
#simulate using an external vhdl simulator
chip=make_chip("helo byd!\n", Console)
cpp_plugin = Plugin()
chip.write_code(cpp_plugin)
cpp_plugin.test("Example 1 : Hello world .... in welsh!", stop_cycles=2000)
if "visualize" in sys.argv:
from chips.visual_plugin import Plugin
#simulate using an external vhdl simulator
chip=make_chip("helo byd!\n", Console)
visual_plugin = Plugin("Example 1 : Hello world .... in welsh!")
chip.write_code(visual_plugin)
visual_plugin.draw("example_1.svg")
if "build" in sys.argv:
import os
import shutil
from chips.VHDL_plugin import Plugin #import VHDL plugin
#compile into a xilinx device
chip=make_chip("helo byd!\n", SerialOut)
vhdl_plugin = Plugin(internal_reset=False, internal_clock=False)
chip.write_code(vhdl_plugin)
from_file=os.path.join(".", "ucfs", "example_1.ucf")
to_file=os.path.join(".", "project", "xilinx", "project.ucf")
shutil.copy(from_file, to_file)
vhdl_plugin.xilinx_build("xc3s200-4-ft256")
if "test" in sys.argv:
#capture output from serial port
from serial import Serial
port = Serial("/dev/ttyUSB0", baudrate=115200, bytesize=8, parity="N", stopbits=1)
response = port.readline()
response = port.readline()
print response
port.close() | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/trakt/db.py | import time
from datetime import datetime, timedelta
from typing import List
from dateutil.parser import parse as dateutil_parse
from loguru import logger
from sqlalchemy import Column, Date, DateTime, Integer, String, Table, Time, Unicode, and_, or_
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from flexget import db_schema, plugin
from flexget.manager import Session
from flexget.terminal import console
from flexget.utils import requests
from flexget.utils.database import json_synonym
from flexget.utils.tools import split_title_year
Base = db_schema.versioned_base('api_trakt', 7)
AuthBase = db_schema.versioned_base('trakt_auth', 0)
logger = logger.bind(name='api_trakt')
# Production Site
CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af'
CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2'
API_URL = 'https://api.trakt.tv/'
PIN_URL = 'https://trakt.tv/pin/346'
# Oauth account authentication
class TraktUserAuth(AuthBase):
__tablename__ = 'trakt_user_auth'
account = Column(Unicode, primary_key=True)
access_token = Column(Unicode)
refresh_token = Column(Unicode)
created = Column(DateTime)
expires = Column(DateTime)
def __init__(self, account, access_token, refresh_token, created, expires):
self.account = account
self.access_token = access_token
self.refresh_token = refresh_token
self.expires = token_expire_date(expires)
self.created = token_created_date(created)
def token_expire_date(expires):
return datetime.now() + timedelta(seconds=expires)
def token_created_date(created):
return datetime.fromtimestamp(created)
def device_auth():
data = {'client_id': CLIENT_ID}
try:
r = requests.post(get_api_url('oauth/device/code'), data=data).json()
device_code = r['device_code']
user_code = r['user_code']
expires_in = r['expires_in']
interval = r['interval']
console(
'Please visit {} and authorize Flexget. Your user code is {}. Your code expires in '
'{} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0)
)
logger.debug('Polling for user authorization.')
data['code'] = device_code
data['client_secret'] = CLIENT_SECRET
end_time = time.time() + expires_in
console('Waiting...', end='')
# stop polling after expires_in seconds
while time.time() < end_time:
time.sleep(interval)
polling_request = requests.post(
get_api_url('oauth/device/token'), data=data, raise_status=False
)
if polling_request.status_code == 200: # success
return polling_request.json()
elif polling_request.status_code == 400: # pending -- waiting for user
console('...', end='')
elif polling_request.status_code == 404: # not found -- invalid device_code
raise plugin.PluginError('Invalid device code. Open an issue on Github.')
elif polling_request.status_code == 409: # already used -- user already approved
raise plugin.PluginError('User code has already been approved.')
elif polling_request.status_code == 410: # expired -- restart process
break
elif polling_request.status_code == 418: # denied -- user denied code
raise plugin.PluginError('User code has been denied.')
elif polling_request.status_code == 429: # polling too fast
logger.warning('Polling too quickly. Upping the interval. No action required.')
interval += 1
raise plugin.PluginError('User code has expired. Please try again.')
except requests.RequestException as e:
raise plugin.PluginError(f'Device authorization with Trakt.tv failed: {e}')
def token_oauth(data):
try:
return requests.post(get_api_url('oauth/token'), data=data).json()
except requests.RequestException as e:
raise plugin.PluginError(f'Token exchange with trakt failed: {e}')
def delete_account(account):
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if not acc:
raise plugin.PluginError('Account %s not found.' % account)
session.delete(acc)
def get_access_token(account, token=None, refresh=False, re_auth=False, called_from_cli=False):
"""
Gets authorization info from a pin or refresh token.
:param account: Arbitrary account name to attach authorization to.
:param unicode token: The pin or refresh token, as supplied by the trakt website.
:param bool refresh: If True, refresh the access token using refresh_token from db.
:param bool re_auth: If True, account is re-authorized even if it already exists in db.
:raises RequestException: If there is a network error while authorizing.
"""
data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
}
with Session() as session:
acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first()
if acc and datetime.now() < acc.expires and not refresh and not re_auth:
return acc.access_token
else:
if (
acc
and (refresh or datetime.now() >= acc.expires - timedelta(days=5))
and not re_auth
):
logger.debug('Using refresh token to re-authorize account {}.', account)
data['refresh_token'] = acc.refresh_token
data['grant_type'] = 'refresh_token'
token_dict = token_oauth(data)
elif token:
# We are only in here if a pin was specified, so it's safe to use console instead of logging
console(
'Warning: PIN authorization has been deprecated. Use Device Authorization instead.'
)
data['code'] = token
data['grant_type'] = 'authorization_code'
token_dict = token_oauth(data)
elif called_from_cli:
logger.debug(
'No pin specified for an unknown account {}. Attempting to authorize device.',
account,
)
token_dict = device_auth()
else:
raise plugin.PluginError(
'Account %s has not been authorized. See `flexget trakt auth -h` on how to.'
% account
)
try:
new_acc = TraktUserAuth(
account,
token_dict['access_token'],
token_dict['refresh_token'],
token_dict.get('created_at', time.time()),
token_dict['expires_in'],
)
session.merge(new_acc)
return new_acc.access_token
except requests.RequestException as e:
raise plugin.PluginError(f'Token exchange with trakt failed: {e}')
def make_list_slug(name):
"""Return the slug for use in url for given list name."""
slug = name.lower()
# These characters are just stripped in the url
for char in '!@#$%^*()[]{}/=?+\\|':
slug = slug.replace(char, '')
# These characters get replaced
slug = slug.replace('&', 'and')
slug = slug.replace(' ', '-')
return slug
def get_session(account=None, token=None):
"""
Creates a requests session ready to talk to trakt API with FlexGet's api key.
Can also add user level authentication if `account` parameter is given.
:param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be
authenticated for that account.
"""
# default to username if account name is not specified
session = requests.Session()
session.headers = {
'Content-Type': 'application/json',
'trakt-api-version': '2',
'trakt-api-key': CLIENT_ID,
}
if account:
access_token = get_access_token(account, token) if account else None
if access_token:
session.headers.update({'Authorization': 'Bearer %s' % access_token})
return session
def get_api_url(*endpoint):
"""
Get the address of a trakt API endpoint.
:param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist')
Multiple parameters can also be specified instead of a single iterable.
:returns: The absolute url to the specified API endpoint.
"""
if len(endpoint) == 1 and not isinstance(endpoint[0], str):
endpoint = endpoint[0]
# Make sure integer portions are turned into strings first too
url = API_URL + '/'.join(map(str, endpoint))
return url
@db_schema.upgrade('api_trakt')
def upgrade(ver, session):
if ver is None or ver <= 6:
raise db_schema.UpgradeImpossible
return ver
def get_entry_ids(entry):
"""Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups."""
ids = {}
for lazy in [False, True]:
if entry.get('trakt_movie_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_movie_id']
elif entry.get('trakt_show_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_show_id']
elif entry.get('trakt_episode_id', eval_lazy=lazy):
ids['trakt'] = entry['trakt_episode_id']
if entry.get('tmdb_id', eval_lazy=lazy):
ids['tmdb'] = entry['tmdb_id']
if entry.get('tvdb_id', eval_lazy=lazy):
ids['tvdb'] = entry['tvdb_id']
if entry.get('imdb_id', eval_lazy=lazy):
ids['imdb'] = entry['imdb_id']
if entry.get('tvrage_id', eval_lazy=lazy):
ids['tvrage'] = entry['tvrage_id']
if ids:
break
return ids
class TraktMovieTranslation(Base):
__tablename__ = 'trakt_movie_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
tagline = Column(Unicode)
title = Column(Unicode)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'))
def __init__(self, translation, session):
super().__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
class TraktShowTranslation(Base):
__tablename__ = 'trakt_show_translations'
id = Column(Integer, primary_key=True, autoincrement=True)
language = Column(Unicode)
overview = Column(Unicode)
title = Column(Unicode)
show_id = Column(Integer, ForeignKey('trakt_shows.id'))
def __init__(self, translation, session):
super().__init__()
self.update(translation, session)
def update(self, translation, session):
for col in translation.keys():
setattr(self, col, translation.get(col))
def get_translations(ident, style):
url = get_api_url(style + 's', ident, 'translations')
trakt_translation = TraktShowTranslation if style == 'show' else TraktMovieTranslation
trakt_translation_id = getattr(trakt_translation, style + '_id')
translations = []
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results:
translation = (
session.query(trakt_translation)
.filter(
and_(
trakt_translation.language == result.get('language'),
trakt_translation_id == ident,
)
)
.first()
)
if not translation:
translation = trakt_translation(result, session)
translations.append(translation)
return translations
except requests.RequestException as e:
logger.debug('Error adding translations to trakt id {}: {}', ident, e)
class TraktGenre(Base):
__tablename__ = 'trakt_genres'
name = Column(Unicode, primary_key=True)
show_genres_table = Table(
'trakt_show_genres',
Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')),
)
Base.register_table(show_genres_table)
movie_genres_table = Table(
'trakt_movie_genres',
Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('genre_id', Unicode, ForeignKey('trakt_genres.name')),
)
Base.register_table(movie_genres_table)
class TraktActor(Base):
__tablename__ = 'trakt_actors'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode)
slug = Column(Unicode)
tmdb = Column(Integer)
imdb = Column(Unicode)
biography = Column(Unicode)
birthday = Column(Date)
death = Column(Date)
homepage = Column(Unicode)
def __init__(self, actor, session):
super().__init__()
self.update(actor, session)
def update(self, actor, session):
if self.id and self.id != actor.get('ids').get('trakt'):
raise Exception('Tried to update db actors with different actor data')
elif not self.id:
self.id = actor.get('ids').get('trakt')
self.name = actor.get('name')
ids = actor.get('ids')
self.imdb = ids.get('imdb')
self.slug = ids.get('slug')
self.tmdb = ids.get('tmdb')
self.biography = actor.get('biography')
if actor.get('birthday'):
self.birthday = dateutil_parse(actor.get('birthday'))
if actor.get('death'):
self.death = dateutil_parse(actor.get('death'))
self.homepage = actor.get('homepage')
def to_dict(self):
return {'name': self.name, 'trakt_id': self.id, 'imdb_id': self.imdb, 'tmdb_id': self.tmdb}
show_actors_table = Table(
'trakt_show_actors',
Base.metadata,
Column('show_id', Integer, ForeignKey('trakt_shows.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')),
)
Base.register_table(show_actors_table)
movie_actors_table = Table(
'trakt_movie_actors',
Base.metadata,
Column('movie_id', Integer, ForeignKey('trakt_movies.id')),
Column('actors_id', Integer, ForeignKey('trakt_actors.id')),
)
Base.register_table(movie_actors_table)
def get_db_actors(ident, style) -> List[TraktActor]:
actors = {}
url = get_api_url(f'{style}s', ident, 'people')
req_session = get_session()
try:
results = req_session.get(url, params={'extended': 'full'}).json()
with Session() as session:
for result in results.get('cast'):
trakt_id = result.get('person').get('ids').get('trakt')
# sometimes an actor can occur twice in the list by mistake. This check is to avoid this unlikely event
if trakt_id in actors:
continue
actor = session.query(TraktActor).filter(TraktActor.id == trakt_id).first()
if not actor:
actor = TraktActor(result.get('person'), session)
actors[trakt_id] = actor
except requests.RequestException as e:
logger.debug('Error searching for actors for trakt id {}', e)
return list(actors.values())
def get_translations_dict(translate, style):
res = {}
for lang in translate:
info = {'overview': lang.overview, 'title': lang.title}
if style == 'movie':
info['tagline'] = lang.tagline
res[lang.language] = info
return res
def list_actors(actors):
res = {}
for actor in actors:
info = {
'trakt_id': actor.id,
'name': actor.name,
'imdb_id': str(actor.imdb),
'trakt_slug': actor.slug,
'tmdb_id': str(actor.tmdb),
'birthday': actor.birthday.strftime("%Y/%m/%d") if actor.birthday else None,
'biography': actor.biography,
'homepage': actor.homepage,
'death': actor.death.strftime("%Y/%m/%d") if actor.death else None,
}
res[str(actor.id)] = info
return res
class TraktEpisode(Base):
__tablename__ = 'trakt_episodes'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
season = Column(Integer)
number = Column(Integer)
number_abs = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_episode, session):
super().__init__()
self.update(trakt_episode, session)
def update(self, trakt_episode, session):
"""Updates this record from the trakt media object `trakt_episode` returned by the trakt api."""
if self.id and self.id != trakt_episode['ids']['trakt']:
raise Exception('Tried to update db ep with different ep data')
elif not self.id:
self.id = trakt_episode['ids']['trakt']
self.imdb_id = trakt_episode['ids']['imdb']
self.tmdb_id = trakt_episode['ids']['tmdb']
self.tvrage_id = trakt_episode['ids']['tvrage']
self.tvdb_id = trakt_episode['ids']['tvdb']
self.first_aired = None
if trakt_episode.get('first_aired'):
self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True)
self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True)
self.cached_at = datetime.now()
for col in ['title', 'season', 'number', 'number_abs', 'overview']:
setattr(self, col, trakt_episode.get(col))
@property
def expired(self):
# TODO should episode have its own expiration function?
return False
class TraktSeason(Base):
__tablename__ = 'trakt_seasons'
id = Column(Integer, primary_key=True, autoincrement=False)
tvdb_id = Column(Integer)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
title = Column(Unicode)
number = Column(Integer)
episode_count = Column(Integer)
aired_episodes = Column(Integer)
overview = Column(Unicode)
first_aired = Column(DateTime)
ratings = Column(Integer)
votes = Column(Integer)
cached_at = Column(DateTime)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False)
def __init__(self, trakt_season, session):
super().__init__()
self.update(trakt_season, session)
def update(self, trakt_season, session):
"""Updates this record from the trakt media object `trakt_episode` returned by the trakt api."""
if self.id and self.id != trakt_season['ids']['trakt']:
raise Exception('Tried to update db season with different season data')
elif not self.id:
self.id = trakt_season['ids']['trakt']
self.tmdb_id = trakt_season['ids']['tmdb']
self.tvrage_id = trakt_season['ids']['tvrage']
self.tvdb_id = trakt_season['ids']['tvdb']
self.first_aired = None
if trakt_season.get('first_aired'):
self.first_aired = dateutil_parse(trakt_season['first_aired'], ignoretz=True)
self.cached_at = datetime.now()
for col in [
'title',
'number',
'episode_count',
'aired_episodes',
'ratings',
'votes',
'overview',
]:
setattr(self, col, trakt_season.get(col))
@property
def expired(self):
# TODO should season have its own expiration function?
return False
class TraktShow(Base):
__tablename__ = 'trakt_shows'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
tvdb_id = Column(Integer)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tvrage_id = Column(Unicode)
overview = Column(Unicode)
first_aired = Column(DateTime)
air_day = Column(Unicode)
air_time = Column(Time)
timezone = Column(Unicode)
runtime = Column(Integer)
certification = Column(Unicode)
network = Column(Unicode)
country = Column(Unicode)
status = Column(String)
rating = Column(Integer)
votes = Column(Integer)
language = Column(Unicode)
homepage = Column(Unicode)
trailer = Column(Unicode)
aired_episodes = Column(Integer)
_translations = relationship(TraktShowTranslation)
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
episodes = relationship(
TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic'
)
seasons = relationship(
TraktSeason, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic'
)
genres = relationship(TraktGenre, secondary=show_genres_table)
_actors = relationship(TraktActor, secondary=show_actors_table)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"tvdb_id": self.tvdb_id,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tvrage_id": self.tvrage_id,
"overview": self.overview,
"first_aired": self.first_aired,
"air_day": self.air_day,
"air_time": self.air_time.strftime("%H:%M") if self.air_time else None,
"timezone": self.timezone,
"runtime": self.runtime,
"certification": self.certification,
"network": self.network,
"country": self.country,
"status": self.status,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"number_of_aired_episodes": self.aired_episodes,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at,
}
def __init__(self, trakt_show, session):
super().__init__()
self.update(trakt_show, session)
def update(self, trakt_show, session):
"""Updates this record from the trakt media object `trakt_show` returned by the trakt api."""
if self.id and self.id != trakt_show['ids']['trakt']:
raise Exception('Tried to update db show with different show data')
elif not self.id:
self.id = trakt_show['ids']['trakt']
self.slug = trakt_show['ids']['slug']
self.imdb_id = trakt_show['ids']['imdb']
self.tmdb_id = trakt_show['ids']['tmdb']
self.tvrage_id = trakt_show['ids']['tvrage']
self.tvdb_id = trakt_show['ids']['tvdb']
if trakt_show.get('airs'):
airs = trakt_show.get('airs')
self.air_day = airs.get('day')
self.timezone = airs.get('timezone')
if airs.get('time'):
# Time might be HH:MM, or HH:MM:SS #2783
self.air_time = dateutil_parse(airs['time'], ignoretz=True).time()
else:
self.air_time = None
if trakt_show.get('first_aired'):
self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True)
else:
self.first_aired = None
self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True)
for col in [
'overview',
'runtime',
'rating',
'votes',
'language',
'title',
'year',
'runtime',
'certification',
'network',
'country',
'status',
'aired_episodes',
'trailer',
'homepage',
]:
setattr(self, col, trakt_show.get(col))
# Sometimes genres and translations are None but we really do want a list, hence the "or []"
self.genres = [
TraktGenre(name=g.replace(' ', '-')) for g in trakt_show.get('genres') or []
]
self.cached_at = datetime.now()
self.translation_languages = trakt_show.get('available_translations') or []
def get_episode(self, season, number, session, only_cached=False):
# TODO: Does series data being expired mean all episode data should be refreshed?
episode = (
self.episodes.filter(TraktEpisode.season == season)
.filter(TraktEpisode.number == number)
.first()
)
if not episode or self.expired:
url = get_api_url(
'shows', self.id, 'seasons', season, 'episodes', number, '?extended=full'
)
if only_cached:
raise LookupError(f'Episode {season} {number} not found in cache')
logger.debug(
'Episode {} {} not found in cache, looking up from trakt.', season, number
)
try:
data = get_session().get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first()
if episode:
episode.update(data, session)
else:
episode = TraktEpisode(data, session)
self.episodes.append(episode)
session.commit()
return episode
def get_season(self, number, session, only_cached=False):
# TODO: Does series data being expired mean all season data should be refreshed?
season = self.seasons.filter(TraktSeason.number == number).first()
if not season or self.expired:
url = get_api_url('shows', self.id, 'seasons', '?extended=full')
if only_cached:
raise LookupError('Season %s not found in cache' % number)
logger.debug('Season {} not found in cache, looking up from trakt.', number)
try:
ses = get_session()
data = ses.get(url).json()
except requests.RequestException:
raise LookupError('Error Retrieving Trakt url: %s' % url)
if not data:
raise LookupError('No data in response from trakt %s' % url)
# We fetch all seasons for the given show because we barely get any data otherwise
for season_result in data:
db_season = self.seasons.filter(
TraktSeason.id == season_result['ids']['trakt']
).first()
if db_season:
db_season.update(season_result, session)
else:
db_season = TraktSeason(season_result, session)
self.seasons.append(db_season)
if number == season_result['number']:
season = db_season
if not season:
raise LookupError(f'Season {number} not found for show {self.title}')
session.commit()
return season
@property
def expired(self):
"""
:return: True if show details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.cached_at is None:
logger.debug('cached_at is None: {}', self)
return True
refresh_interval = 2
# if show has been cancelled or ended, then it is unlikely to be updated often
if self.year and (self.status == 'ended' or self.status == 'canceled'):
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
logger.debug('show `{}` age {} expires in {} days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'show')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'show')
return self._actors
def __repr__(self):
return f'<name={self.title}, id={self.id}>'
class TraktMovie(Base):
__tablename__ = 'trakt_movies'
id = Column(Integer, primary_key=True, autoincrement=False)
title = Column(Unicode)
year = Column(Integer)
slug = Column(Unicode)
imdb_id = Column(Unicode)
tmdb_id = Column(Integer)
tagline = Column(Unicode)
overview = Column(Unicode)
released = Column(Date)
runtime = Column(Integer)
rating = Column(Integer)
votes = Column(Integer)
trailer = Column(Unicode)
homepage = Column(Unicode)
language = Column(Unicode)
updated_at = Column(DateTime)
cached_at = Column(DateTime)
_translations = relationship(TraktMovieTranslation, backref='movie')
_translation_languages = Column('translation_languages', Unicode)
translation_languages = json_synonym('_translation_languages')
genres = relationship(TraktGenre, secondary=movie_genres_table)
_actors = relationship(TraktActor, secondary=movie_actors_table)
def __init__(self, trakt_movie, session):
super().__init__()
self.update(trakt_movie, session)
def to_dict(self):
return {
"id": self.id,
"title": self.title,
"year": self.year,
"slug": self.slug,
"imdb_id": self.imdb_id,
"tmdb_id": self.tmdb_id,
"tagline": self.tagline,
"overview": self.overview,
"released": self.released,
"runtime": self.runtime,
"rating": self.rating,
"votes": self.votes,
"language": self.language,
"homepage": self.homepage,
"trailer": self.trailer,
"genres": [g.name for g in self.genres],
"updated_at": self.updated_at,
"cached_at": self.cached_at,
}
def update(self, trakt_movie, session):
"""Updates this record from the trakt media object `trakt_movie` returned by the trakt api."""
if self.id and self.id != trakt_movie['ids']['trakt']:
raise Exception('Tried to update db movie with different movie data')
elif not self.id:
self.id = trakt_movie['ids']['trakt']
self.slug = trakt_movie['ids']['slug']
self.imdb_id = trakt_movie['ids']['imdb']
self.tmdb_id = trakt_movie['ids']['tmdb']
for col in [
'title',
'overview',
'runtime',
'rating',
'votes',
'language',
'tagline',
'year',
'trailer',
'homepage',
]:
setattr(self, col, trakt_movie.get(col))
if trakt_movie.get('released'):
self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True).date()
self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True)
self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_movie.get('genres', [])]
self.cached_at = datetime.now()
self.translation_languages = trakt_movie.get('available_translations', [])
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
# TODO stolen from imdb plugin, maybe there's a better way?
if self.updated_at is None:
logger.debug('updated_at is None: {}', self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
logger.debug('movie `{}` age {} expires in {} days', self.title, age, refresh_interval)
return self.cached_at < datetime.now() - timedelta(days=refresh_interval)
@property
def translations(self):
if not self._translations:
self._translations = get_translations(self.id, 'movie')
return self._translations
@property
def actors(self):
if not self._actors:
self._actors[:] = get_db_actors(self.id, 'movie')
return self._actors
class TraktShowSearchResult(Base):
__tablename__ = 'trakt_show_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True)
series = relationship(TraktShow, backref='search_strings')
def __init__(self, search, series_id=None, series=None):
self.search = search.lower()
if series_id:
self.series_id = series_id
if series:
self.series = series
class TraktMovieSearchResult(Base):
__tablename__ = 'trakt_movie_search_results'
id = Column(Integer, primary_key=True)
search = Column(Unicode, unique=True, nullable=False)
movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True)
movie = relationship(TraktMovie, backref='search_strings')
def __init__(self, search, movie_id=None, movie=None):
self.search = search.lower()
if movie_id:
self.movie_id = movie_id
if movie:
self.movie = movie
class TraktMovieIds:
"""Simple class that holds a variety of possible IDs that Trakt utilize in their API, eg. imdb id, trakt id"""
def __init__(self, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None, **kwargs):
self.trakt_id = trakt_id
self.trakt_slug = trakt_slug
self.tmdb_id = tmdb_id
self.imdb_id = imdb_id
def get_trakt_id(self):
return self.trakt_id or self.trakt_slug
def to_dict(self):
"""Returns a dict containing id fields that are relevant for a movie"""
return {
'id': self.trakt_id,
'slug': self.trakt_slug,
'tmdb_id': self.tmdb_id,
'imdb_id': self.imdb_id,
}
def __bool__(self):
return any([self.trakt_id, self.trakt_slug, self.tmdb_id, self.imdb_id])
class TraktShowIds:
"""Simple class that holds a variety of possible IDs that Trakt utilize in their API, eg. imdb id, trakt id"""
def __init__(
self,
trakt_id=None,
trakt_slug=None,
tmdb_id=None,
imdb_id=None,
tvdb_id=None,
tvrage_id=None,
**kwargs,
):
self.trakt_id = trakt_id
self.trakt_slug = trakt_slug
self.tmdb_id = tmdb_id
self.imdb_id = imdb_id
self.tvdb_id = tvdb_id
self.tvrage_id = tvrage_id
def get_trakt_id(self):
return self.trakt_id or self.trakt_slug
def to_dict(self):
"""Returns a dict containing id fields that are relevant for a show/season/episode"""
return {
'id': self.trakt_id,
'slug': self.trakt_slug,
'tmdb_id': self.tmdb_id,
'imdb_id': self.imdb_id,
'tvdb_id': self.tvdb_id,
'tvrage_id': self.tvrage_id,
}
def __bool__(self):
return any(
[
self.trakt_id,
self.trakt_slug,
self.tmdb_id,
self.imdb_id,
self.tvdb_id,
self.tvrage_id,
]
)
def get_item_from_cache(table, session, title=None, year=None, trakt_ids=None):
"""
Get the cached info for a given show/movie from the database.
:param table: Either TraktMovie or TraktShow
:param title: Title of the show/movie
:param year: First release year
:param trakt_ids: instance of TraktShowIds or TraktMovieIds
:param session: database session object
:return: query result
"""
result = None
if trakt_ids:
result = (
session.query(table)
.filter(
or_(getattr(table, col) == val for col, val in trakt_ids.to_dict().items() if val)
)
.first()
)
elif title:
if not year:
title, year = split_title_year(title)
query = session.query(table).filter(table.title == title)
if year:
query = query.filter(table.year == year)
result = query.first()
return result
def get_trakt_id_from_id(trakt_ids, media_type):
if not trakt_ids:
raise LookupError('No lookup arguments provided.')
requests_session = get_session()
for id_type, identifier in trakt_ids.to_dict().items():
if not identifier:
continue
stripped_id_type = id_type.rstrip('_id') # need to remove _id for the api call
try:
logger.debug('Searching with params: {}={}', stripped_id_type, identifier)
results = requests_session.get(
get_api_url('search'), params={'id_type': stripped_id_type, 'id': identifier}
).json()
except requests.RequestException as e:
raise LookupError(
f'Searching trakt for {stripped_id_type}={identifier} failed with error: {e}'
)
for result in results:
if result['type'] != media_type:
continue
return result[media_type]['ids']['trakt']
def get_trakt_id_from_title(title, media_type, year=None):
if not title:
raise LookupError('No lookup arguments provided.')
requests_session = get_session()
# Try finding trakt id based on title and year
parsed_title, y = split_title_year(title)
y = year or y
try:
params = {'query': parsed_title, 'type': media_type, 'year': y}
logger.debug('Type of title: {}', type(parsed_title))
logger.debug('Searching with params: {}', ', '.join(f'{k}={v}' for k, v in params.items()))
results = requests_session.get(get_api_url('search'), params=params).json()
except requests.RequestException as e:
raise LookupError(f'Searching trakt for {title} failed with error: {e}')
for result in results:
if year and result[media_type]['year'] != year:
continue
if parsed_title.lower() == result[media_type]['title'].lower():
return result[media_type]['ids']['trakt']
# grab the first result if there is no exact match
if results:
return results[0][media_type]['ids']['trakt']
def get_trakt_data(media_type, title=None, year=None, trakt_ids=None):
trakt_id = None
if trakt_ids:
trakt_id = trakt_ids.get_trakt_id()
if not trakt_id and trakt_ids:
trakt_id = get_trakt_id_from_id(trakt_ids, media_type)
if not trakt_id and title:
trakt_id = get_trakt_id_from_title(title, media_type, year=year)
if not trakt_id:
raise LookupError(
f'No results on Trakt.tv, title={title}, ids={trakt_ids.to_dict if trakt_ids else None}.'
)
# Get actual data from trakt
try:
return (
get_session()
.get(get_api_url(media_type + 's', trakt_id), params={'extended': 'full'})
.json()
)
except requests.RequestException as e:
raise LookupError(f'Error getting trakt data for id {trakt_id}: {e}')
def get_user_data(data_type, media_type, session, username):
"""
Fetches user data from Trakt.tv on the /users/<username>/<data_type>/<media_type> end point. Eg. a user's
movie collection is fetched from /users/<username>/collection/movies.
:param data_type: Name of the data type eg. collection, watched etc.
:param media_type: Type of media we want <data_type> for eg. shows, episodes, movies.
:param session: A trakt requests session with a valid token
:param username: Username of the user to fetch data
:return:
"""
endpoint = f'{data_type}/{media_type}'
try:
data = session.get(get_api_url('users', username, data_type, media_type)).json()
if not data:
logger.warning('No {} data returned from trakt endpoint {}.', data_type, endpoint)
return []
logger.verbose(
'Received {} records from trakt.tv for user {} from endpoint {}',
len(data),
username,
endpoint,
)
# extract show, episode and movie information
for item in data:
episode = item.pop('episode', {})
season = item.pop('season', {})
show = item.pop('show', {})
movie = item.pop('movie', {})
item.update(episode)
item.update(season)
item.update(movie)
# show is irrelevant if either episode or season is present
if not episode and not season:
item.update(show)
return data
except requests.RequestException as e:
raise plugin.PluginError(
f'Error fetching data from trakt.tv endpoint {endpoint} for user {username}: {e}'
)
def get_username(username=None, account=None):
"""Returns 'me' if account is provided and username is not"""
if not username and account:
return 'me'
return username | PypiClean |
/73e4d8e848405a88f444cff1c9dbc5b8-0.5-py3-none-any.whl/sstools/saffronstays.py | import requests
import json
import pandas as pd
from sendgrid.helpers.mail import Mail
from sendgrid import SendGridAPIClient
from .common import *
# SQL query on the SS database (ONLY SELECT) - returns a dataframe
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="main")
if query.split(' ')[0] != 'SELECT':
print("Error. Please only use non destructive (SELECT) queries.")
return "Please only use non destructive (SELECT) queries."
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# to execute destructive queries
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="main")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
class dev:
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="dev")
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="dev")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
class aws:
def sql_query(query,cypher_key):
myConnection = db_connection(cypher_key,database="main")
if query.split(' ')[0] != 'SELECT':
print("Error. Please only use non destructive (SELECT) queries.")
return "Please only use non destructive (SELECT) queries."
response_df = pd.io.sql.read_sql(query, con=myConnection)
myConnection.close()
return response_df
# to execute destructive queries
def sql_query_destructive(query,cypher_key):
con = db_connection(cypher_key,database="main")
try:
with con.cursor() as cur:
cur.execute(query)
con.commit()
finally:
con.close()
# Get the status for all the dates for a list of homes
def get_calendar(listing_ids,check_in,check_out):
parsed_listing_ids = str(listing_ids)[1:-1]
parsed_listing_ids = parsed_listing_ids.replace("'","").replace(" ","")
url = "https://www.saffronstays.com/calender_node.php"
params={
"listingList": parsed_listing_ids,
"checkIn":check_in,
"checkOut":check_out
}
payload = {}
headers= {}
response = requests.get(url, headers=headers, data = payload,params=params)
response = json.loads(response.text.encode('utf8'))
return response
# SS Facebook catalogue (a list of currently live listings)
def ss_fb_catalogue():
url = "https://www.saffronstays.com/items_catalogue.php"
response = requests.get(url)
response_data = response.text.encode('utf8')
csv_endpoint = str(response_data).split('`')[1]
csv_download_url = "https://www.saffronstays.com/"+csv_endpoint
ss_data = pd.read_csv(csv_download_url)
return ss_data
# list of emails and preheader names, update with yours
def sendgrid_email(TEMPLATE_ID,EMAILS,api_key,PAYLOAD={},from_email='[email protected]',from_name='SaffronStays'):
""" Send a dynamic email to a list of email addresses
:returns API response code
:raises Exception e: raises an exception """
# create Mail object and populate
message = Mail(
from_email=(from_email,from_name),
to_emails=EMAILS
)
# pass custom values for our HTML placeholders
message.dynamic_template_data = PAYLOAD
message.template_id = TEMPLATE_ID
# create our sendgrid client object, pass it our key, then send and return our response objects
try:
sg = SendGridAPIClient(api_key)
response = sg.send(message)
code, body, headers = response.status_code, response.body, response.headers
print(f"Response code: {code}")
print(f"Response headers: {headers}")
print(f"Response body: {body}")
print("Dynamic Messages Sent!")
return str(response.status_code)
except Exception as e:
print("Error: {0}".format(e))
return "Error: {0}".format(e) | PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/kallithea/public/js/mergely.js | "use strict";
(function( window, document, jQuery, CodeMirror ){
var Mgly = {};
Mgly.Timer = function(){
var self = this;
self.start = function() { self.t0 = new Date().getTime(); };
self.stop = function() {
var t1 = new Date().getTime();
var d = t1 - self.t0;
self.t0 = t1;
return d;
};
self.start();
};
Mgly.ChangeExpression = new RegExp(/(^(?![><\-])*\d+(?:,\d+)?)([acd])(\d+(?:,\d+)?)/);
Mgly.DiffParser = function(diff) {
var changes = [];
var change_id = 0;
// parse diff
var diff_lines = diff.split(/\n/);
for (var i = 0; i < diff_lines.length; ++i) {
if (diff_lines[i].length == 0) continue;
var change = {};
var test = Mgly.ChangeExpression.exec(diff_lines[i]);
if (test == null) continue;
// lines are zero-based
var fr = test[1].split(',');
change['lhs-line-from'] = fr[0] - 1;
if (fr.length == 1) change['lhs-line-to'] = fr[0] - 1;
else change['lhs-line-to'] = fr[1] - 1;
var to = test[3].split(',');
change['rhs-line-from'] = to[0] - 1;
if (to.length == 1) change['rhs-line-to'] = to[0] - 1;
else change['rhs-line-to'] = to[1] - 1;
change['op'] = test[2];
changes[change_id++] = change;
}
return changes;
};
Mgly.sizeOf = function(obj) {
var size = 0, key;
for (key in obj) {
if (obj.hasOwnProperty(key)) size++;
}
return size;
};
Mgly.LCS = function(x, y) {
this.x = x.replace(/[ ]{1}/g, '\n');
this.y = y.replace(/[ ]{1}/g, '\n');
};
jQuery.extend(Mgly.LCS.prototype, {
clear: function() { this.ready = 0; },
diff: function(added, removed) {
var d = new Mgly.diff(this.x, this.y, {ignorews: false});
var changes = Mgly.DiffParser(d.normal_form());
var li = 0, lj = 0;
for (var i = 0; i < changes.length; ++i) {
var change = changes[i];
if (change.op != 'a') {
// find the starting index of the line
li = d.getLines('lhs').slice(0, change['lhs-line-from']).join(' ').length;
// get the index of the the span of the change
lj = change['lhs-line-to'] + 1;
// get the changed text
var lchange = d.getLines('lhs').slice(change['lhs-line-from'], lj).join(' ');
if (change.op == 'd') lchange += ' ';// include the leading space
else if (li > 0 && change.op == 'c') li += 1; // ignore leading space if not first word
// output the changed index and text
removed(li, li + lchange.length);
}
if (change.op != 'd') {
// find the starting index of the line
li = d.getLines('rhs').slice(0, change['rhs-line-from']).join(' ').length;
// get the index of the the span of the change
lj = change['rhs-line-to'] + 1;
// get the changed text
var rchange = d.getLines('rhs').slice(change['rhs-line-from'], lj).join(' ');
if (change.op == 'a') rchange += ' ';// include the leading space
else if (li > 0 && change.op == 'c') li += 1; // ignore leading space if not first word
// output the changed index and text
added(li, li + rchange.length);
}
}
}
});
Mgly.CodeifyText = function(settings) {
this._max_code = 0;
this._diff_codes = {};
this.ctxs = {};
this.options = {ignorews: false};
jQuery.extend(this, settings);
this.lhs = settings.lhs.split('\n');
this.rhs = settings.rhs.split('\n');
};
jQuery.extend(Mgly.CodeifyText.prototype, {
getCodes: function(side) {
if (!this.ctxs.hasOwnProperty(side)) {
var ctx = this._diff_ctx(this[side]);
this.ctxs[side] = ctx;
ctx.codes.length = Object.keys(ctx.codes).length;
}
return this.ctxs[side].codes;
},
getLines: function(side) {
return this.ctxs[side].lines;
},
_diff_ctx: function(lines) {
var ctx = {i: 0, codes: {}, lines: lines};
this._codeify(lines, ctx);
return ctx;
},
_codeify: function(lines, ctx) {
var code = this._max_code;
for (var i = 0; i < lines.length; ++i) {
var line = lines[i];
if (this.options.ignorews) {
line = line.replace(/\s+/g, '');
}
var aCode = this._diff_codes[line];
if (aCode != undefined) {
ctx.codes[i] = aCode;
}
else {
this._max_code++;
this._diff_codes[line] = this._max_code;
ctx.codes[i] = this._max_code;
}
}
}
});
Mgly.diff = function(lhs, rhs, options) {
var opts = jQuery.extend({ignorews: false}, options);
this.codeify = new Mgly.CodeifyText({
lhs: lhs,
rhs: rhs,
options: opts
});
var lhs_ctx = {
codes: this.codeify.getCodes('lhs'),
modified: {}
};
var rhs_ctx = {
codes: this.codeify.getCodes('rhs'),
modified: {}
};
var max = (lhs_ctx.codes.length + rhs_ctx.codes.length + 1);
var vector_d = [];
var vector_u = [];
this._lcs(lhs_ctx, 0, lhs_ctx.codes.length, rhs_ctx, 0, rhs_ctx.codes.length, vector_u, vector_d);
this._optimize(lhs_ctx);
this._optimize(rhs_ctx);
this.items = this._create_diffs(lhs_ctx, rhs_ctx);
};
jQuery.extend(Mgly.diff.prototype, {
changes: function() { return this.items; },
getLines: function(side) {
return this.codeify.getLines(side);
},
normal_form: function() {
var nf = '';
for (var index = 0; index < this.items.length; ++index) {
var item = this.items[index];
var lhs_str = '';
var rhs_str = '';
var change = 'c';
if (item.lhs_deleted_count == 0 && item.rhs_inserted_count > 0) change = 'a';
else if (item.lhs_deleted_count > 0 && item.rhs_inserted_count == 0) change = 'd';
if (item.lhs_deleted_count == 1) lhs_str = item.lhs_start + 1;
else if (item.lhs_deleted_count == 0) lhs_str = item.lhs_start;
else lhs_str = (item.lhs_start + 1) + ',' + (item.lhs_start + item.lhs_deleted_count);
if (item.rhs_inserted_count == 1) rhs_str = item.rhs_start + 1;
else if (item.rhs_inserted_count == 0) rhs_str = item.rhs_start;
else rhs_str = (item.rhs_start + 1) + ',' + (item.rhs_start + item.rhs_inserted_count);
nf += lhs_str + change + rhs_str + '\n';
var lhs_lines = this.getLines('lhs');
var rhs_lines = this.getLines('rhs');
if (rhs_lines && lhs_lines) {
var i;
// if rhs/lhs lines have been retained, output contextual diff
for (i = item.lhs_start; i < item.lhs_start + item.lhs_deleted_count; ++i) {
nf += '< ' + lhs_lines[i] + '\n';
}
if (item.rhs_inserted_count && item.lhs_deleted_count) nf += '---\n';
for (i = item.rhs_start; i < item.rhs_start + item.rhs_inserted_count; ++i) {
nf += '> ' + rhs_lines[i] + '\n';
}
}
}
return nf;
},
_lcs: function(lhs_ctx, lhs_lower, lhs_upper, rhs_ctx, rhs_lower, rhs_upper, vector_u, vector_d) {
while ( (lhs_lower < lhs_upper) && (rhs_lower < rhs_upper) && (lhs_ctx.codes[lhs_lower] == rhs_ctx.codes[rhs_lower]) ) {
++lhs_lower;
++rhs_lower;
}
while ( (lhs_lower < lhs_upper) && (rhs_lower < rhs_upper) && (lhs_ctx.codes[lhs_upper - 1] == rhs_ctx.codes[rhs_upper - 1]) ) {
--lhs_upper;
--rhs_upper;
}
if (lhs_lower == lhs_upper) {
while (rhs_lower < rhs_upper) {
rhs_ctx.modified[ rhs_lower++ ] = true;
}
}
else if (rhs_lower == rhs_upper) {
while (lhs_lower < lhs_upper) {
lhs_ctx.modified[ lhs_lower++ ] = true;
}
}
else {
var sms = this._sms(lhs_ctx, lhs_lower, lhs_upper, rhs_ctx, rhs_lower, rhs_upper, vector_u, vector_d);
this._lcs(lhs_ctx, lhs_lower, sms.x, rhs_ctx, rhs_lower, sms.y, vector_u, vector_d);
this._lcs(lhs_ctx, sms.x, lhs_upper, rhs_ctx, sms.y, rhs_upper, vector_u, vector_d);
}
},
_sms: function(lhs_ctx, lhs_lower, lhs_upper, rhs_ctx, rhs_lower, rhs_upper, vector_u, vector_d) {
var max = lhs_ctx.codes.length + rhs_ctx.codes.length + 1;
var kdown = lhs_lower - rhs_lower;
var kup = lhs_upper - rhs_upper;
var delta = (lhs_upper - lhs_lower) - (rhs_upper - rhs_lower);
var odd = (delta & 1) != 0;
var offset_down = max - kdown;
var offset_up = max - kup;
var maxd = ((lhs_upper - lhs_lower + rhs_upper - rhs_lower) / 2) + 1;
vector_d[ offset_down + kdown + 1 ] = lhs_lower;
vector_u[ offset_up + kup - 1 ] = lhs_upper;
var ret = {x:0,y:0}, d, k, x, y;
for (d = 0; d <= maxd; ++d) {
for (k = kdown - d; k <= kdown + d; k += 2) {
if (k == kdown - d) {
x = vector_d[ offset_down + k + 1 ];//down
}
else {
x = vector_d[ offset_down + k - 1 ] + 1;//right
if ((k < (kdown + d)) && (vector_d[ offset_down + k + 1 ] >= x)) {
x = vector_d[ offset_down + k + 1 ];//down
}
}
y = x - k;
// find the end of the furthest reaching forward D-path in diagonal k.
while ((x < lhs_upper) && (y < rhs_upper) && (lhs_ctx.codes[x] == rhs_ctx.codes[y])) {
x++; y++;
}
vector_d[ offset_down + k ] = x;
// overlap ?
if (odd && (kup - d < k) && (k < kup + d)) {
if (vector_u[offset_up + k] <= vector_d[offset_down + k]) {
ret.x = vector_d[offset_down + k];
ret.y = vector_d[offset_down + k] - k;
return (ret);
}
}
}
// Extend the reverse path.
for (k = kup - d; k <= kup + d; k += 2) {
// find the only or better starting point
if (k == kup + d) {
x = vector_u[offset_up + k - 1]; // up
} else {
x = vector_u[offset_up + k + 1] - 1; // left
if ((k > kup - d) && (vector_u[offset_up + k - 1] < x))
x = vector_u[offset_up + k - 1]; // up
}
y = x - k;
while ((x > lhs_lower) && (y > rhs_lower) && (lhs_ctx.codes[x - 1] == rhs_ctx.codes[y - 1])) {
// diagonal
x--;
y--;
}
vector_u[offset_up + k] = x;
// overlap ?
if (!odd && (kdown - d <= k) && (k <= kdown + d)) {
if (vector_u[offset_up + k] <= vector_d[offset_down + k]) {
ret.x = vector_d[offset_down + k];
ret.y = vector_d[offset_down + k] - k;
return (ret);
}
}
}
}
throw "the algorithm should never come here.";
},
_optimize: function(ctx) {
var start = 0, end = 0;
while (start < ctx.length) {
while ((start < ctx.length) && (ctx.modified[start] == undefined || ctx.modified[start] == false)) {
start++;
}
end = start;
while ((end < ctx.length) && (ctx.modified[end] == true)) {
end++;
}
if ((end < ctx.length) && (ctx.ctx[start] == ctx.codes[end])) {
ctx.modified[start] = false;
ctx.modified[end] = true;
}
else {
start = end;
}
}
},
_create_diffs: function(lhs_ctx, rhs_ctx) {
var items = [];
var lhs_start = 0, rhs_start = 0;
var lhs_line = 0, rhs_line = 0;
while (lhs_line < lhs_ctx.codes.length || rhs_line < rhs_ctx.codes.length) {
if ((lhs_line < lhs_ctx.codes.length) && (!lhs_ctx.modified[lhs_line])
&& (rhs_line < rhs_ctx.codes.length) && (!rhs_ctx.modified[rhs_line])) {
// equal lines
lhs_line++;
rhs_line++;
}
else {
// maybe deleted and/or inserted lines
lhs_start = lhs_line;
rhs_start = rhs_line;
while (lhs_line < lhs_ctx.codes.length && (rhs_line >= rhs_ctx.codes.length || lhs_ctx.modified[lhs_line]))
lhs_line++;
while (rhs_line < rhs_ctx.codes.length && (lhs_line >= lhs_ctx.codes.length || rhs_ctx.modified[rhs_line]))
rhs_line++;
if ((lhs_start < lhs_line) || (rhs_start < rhs_line)) {
// store a new difference-item
items.push({
lhs_start: lhs_start,
rhs_start: rhs_start,
lhs_deleted_count: lhs_line - lhs_start,
rhs_inserted_count: rhs_line - rhs_start
});
}
}
}
return items;
}
});
Mgly.mergely = function(el, options) {
if (el) {
this.init(el, options);
}
};
jQuery.extend(Mgly.mergely.prototype, {
name: 'mergely',
//http://jupiterjs.com/news/writing-the-perfect-jquery-plugin
init: function(el, options) {
this.diffView = new Mgly.CodeMirrorDiffView(el, options);
this.bind(el);
},
bind: function(el) {
this.diffView.bind(el);
}
});
Mgly.CodeMirrorDiffView = function(el, options) {
CodeMirror.defineExtension('centerOnCursor', function() {
var coords = this.cursorCoords(null, 'local');
this.scrollTo(null,
(coords.y + coords.yBot) / 2 - (this.getScrollerElement().clientHeight / 2));
});
this.init(el, options);
};
jQuery.extend(Mgly.CodeMirrorDiffView.prototype, {
init: function(el, options) {
this.settings = {
autoupdate: true,
autoresize: true,
rhs_margin: 'right',
lcs: true,
sidebar: true,
viewport: false,
ignorews: false,
fadein: 'fast',
editor_width: '650px',
editor_height: '400px',
resize_timeout: 500,
change_timeout: 150,
fgcolor: {a:'#4ba3fa',c:'#a3a3a3',d:'#ff7f7f', // color for differences (soft color)
ca:'#4b73ff',cc:'#434343',cd:'#ff4f4f'}, // color for currently active difference (bright color)
bgcolor: '#eee',
vpcolor: 'rgba(0, 0, 200, 0.5)',
lhs: function(setValue) { },
rhs: function(setValue) { },
loaded: function() { },
_auto_width: function(w) { return w; },
resize: function(init) {
var scrollbar = init ? 16 : 0;
var w = jQuery(el).parent().width() + scrollbar, h = 0;
if (this.width == 'auto') {
w = this._auto_width(w);
}
else {
w = this.width;
this.editor_width = w;
}
if (this.height == 'auto') {
//h = this._auto_height(h);
h = jQuery(el).parent().height();
}
else {
h = this.height;
this.editor_height = h;
}
var content_width = w / 2.0 - 2 * 8 - 8;
var content_height = h;
var self = jQuery(el);
self.find('.mergely-column').css({ width: content_width + 'px' });
self.find('.mergely-column, .mergely-canvas, .mergely-margin, .mergely-column textarea, .CodeMirror-scroll, .cm-s-default').css({ height: content_height + 'px' });
self.find('.mergely-canvas').css({ height: content_height + 'px' });
self.find('.mergely-column textarea').css({ width: content_width + 'px' });
self.css({ width: w, height: h, clear: 'both' });
if (self.css('display') == 'none') {
if (this.fadein != false) self.fadeIn(this.fadein);
else self.show();
if (this.loaded) this.loaded();
}
if (this.resized) this.resized();
},
_debug: '', //scroll,draw,calc,diff,markup,change
resized: function() { }
};
var cmsettings = {
mode: 'text/plain',
readOnly: false,
lineWrapping: false,
lineNumbers: true,
gutters: ['merge', 'CodeMirror-linenumbers']
};
this.lhs_cmsettings = {};
this.rhs_cmsettings = {};
// save this element for faster queries
this.element = jQuery(el);
// save options if there are any
if (options && options.cmsettings) jQuery.extend(this.lhs_cmsettings, cmsettings, options.cmsettings, options.lhs_cmsettings);
if (options && options.cmsettings) jQuery.extend(this.rhs_cmsettings, cmsettings, options.cmsettings, options.rhs_cmsettings);
//if (options) jQuery.extend(this.settings, options);
// bind if the element is destroyed
this.element.bind('destroyed', jQuery.proxy(this.teardown, this));
// save this instance in jQuery data, binding this view to the node
jQuery.data(el, 'mergely', this);
this._setOptions(options);
},
unbind: function() {
if (this.changed_timeout != null) clearTimeout(this.changed_timeout);
this.editor[this.id + '-lhs'].toTextArea();
this.editor[this.id + '-rhs'].toTextArea();
},
destroy: function() {
this.element.unbind('destroyed', this.teardown);
this.teardown();
},
teardown: function() {
this.unbind();
},
lhs: function(text) {
this.editor[this.id + '-lhs'].setValue(text);
},
rhs: function(text) {
this.editor[this.id + '-rhs'].setValue(text);
},
update: function() {
this._changing(this.id + '-lhs', this.id + '-rhs');
},
unmarkup: function() {
this._clear();
},
scrollToDiff: function(direction) {
if (!this.changes.length) return;
if (direction == 'next') {
this._current_diff = Math.min(++this._current_diff, this.changes.length - 1);
}
else {
this._current_diff = Math.max(--this._current_diff, 0);
}
this._scroll_to_change(this.changes[this._current_diff]);
this._changed(this.id + '-lhs', this.id + '-rhs');
},
mergeCurrentChange: function(side) {
if (!this.changes.length) return;
if (side == 'lhs' && !this.lhs_cmsettings.readOnly) {
this._merge_change(this.changes[this._current_diff], 'rhs', 'lhs');
}
else if (side == 'rhs' && !this.rhs_cmsettings.readOnly) {
this._merge_change(this.changes[this._current_diff], 'lhs', 'rhs');
}
},
scrollTo: function(side, num) {
var le = this.editor[this.id + '-lhs'];
var re = this.editor[this.id + '-rhs'];
if (side == 'lhs') {
le.setCursor(num);
le.centerOnCursor();
}
else {
re.setCursor(num);
re.centerOnCursor();
}
},
_setOptions: function(opts) {
jQuery.extend(this.settings, opts);
if (this.settings.hasOwnProperty('rhs_margin')) {
// dynamically swap the margin
if (this.settings.rhs_margin == 'left') {
this.element.find('.mergely-margin:last-child').insertAfter(
this.element.find('.mergely-canvas'));
}
else {
var target = this.element.find('.mergely-margin').last();
target.appendTo(target.parent());
}
}
if (this.settings.hasOwnProperty('sidebar')) {
// dynamically enable sidebars
if (this.settings.sidebar) {
jQuery(this.element).find('.mergely-margin').css({display: 'block'});
}
else {
jQuery(this.element).find('.mergely-margin').css({display: 'none'});
}
}
},
options: function(opts) {
if (opts) {
this._setOptions(opts);
if (this.settings.autoresize) this.resize();
if (this.settings.autoupdate) this.update();
}
else {
return this.settings;
}
},
swap: function() {
if (this.lhs_cmsettings.readOnly || this.rhs_cmsettings.readOnly) return;
var le = this.editor[this.id + '-lhs'];
var re = this.editor[this.id + '-rhs'];
var tmp = re.getValue();
re.setValue(le.getValue());
le.setValue(tmp);
},
merge: function(side) {
var le = this.editor[this.id + '-lhs'];
var re = this.editor[this.id + '-rhs'];
if (side == 'lhs' && !this.lhs_cmsettings.readOnly) le.setValue(re.getValue());
else if (!this.rhs_cmsettings.readOnly) re.setValue(le.getValue());
},
get: function(side) {
var ed = this.editor[this.id + '-' + side];
var t = ed.getValue();
if (t == undefined) return '';
return t;
},
clear: function(side) {
if (side == 'lhs' && this.lhs_cmsettings.readOnly) return;
if (side == 'rhs' && this.rhs_cmsettings.readOnly) return;
var ed = this.editor[this.id + '-' + side];
ed.setValue('');
},
cm: function(side) {
return this.editor[this.id + '-' + side];
},
search: function(side, query, direction) {
var le = this.editor[this.id + '-lhs'];
var re = this.editor[this.id + '-rhs'];
var editor;
if (side == 'lhs') editor = le;
else editor = re;
direction = (direction == 'prev') ? 'findPrevious' : 'findNext';
if ((editor.getSelection().length == 0) || (this.prev_query[side] != query)) {
this.cursor[this.id] = editor.getSearchCursor(query, { line: 0, ch: 0 }, false);
this.prev_query[side] = query;
}
var cursor = this.cursor[this.id];
if (cursor[direction]()) {
editor.setSelection(cursor.from(), cursor.to());
}
else {
cursor = editor.getSearchCursor(query, { line: 0, ch: 0 }, false);
}
},
resize: function() {
this.settings.resize();
this._changing(this.id + '-lhs', this.id + '-rhs');
this._set_top_offset(this.id + '-lhs');
},
diff: function() {
var lhs = this.editor[this.id + '-lhs'].getValue();
var rhs = this.editor[this.id + '-rhs'].getValue();
var d = new Mgly.diff(lhs, rhs, this.settings);
return d.normal_form();
},
bind: function(el) {
this.element.hide();//hide
this.id = jQuery(el).attr('id');
this.changed_timeout = null;
this.chfns = {};
this.chfns[this.id + '-lhs'] = [];
this.chfns[this.id + '-rhs'] = [];
this.prev_query = [];
this.cursor = [];
this._skipscroll = {};
this.change_exp = new RegExp(/(\d+(?:,\d+)?)([acd])(\d+(?:,\d+)?)/);
var merge_lhs_button;
var merge_rhs_button;
if (jQuery.button != undefined) {
//jquery ui
merge_lhs_button = '<button title="Merge left"></button>';
merge_rhs_button = '<button title="Merge right"></button>';
}
else {
// homebrew
var style = 'opacity:0.4;width:10px;height:15px;background-color:#888;cursor:pointer;text-align:center;color:#eee;border:1px solid: #222;margin-right:5px;margin-top: -2px;';
merge_lhs_button = '<div style="' + style + '" title="Merge left"><</div>';
merge_rhs_button = '<div style="' + style + '" title="Merge right">></div>';
}
this.merge_rhs_button = jQuery(merge_rhs_button);
this.merge_lhs_button = jQuery(merge_lhs_button);
// create the textarea and canvas elements
var height = this.settings.editor_height;
var width = this.settings.editor_width;
this.element.append(jQuery('<div class="mergely-margin" style="height: ' + height + '"><canvas id="' + this.id + '-lhs-margin" width="8px" height="' + height + '"></canvas></div>'));
this.element.append(jQuery('<div style="position:relative;width:' + width + '; height:' + height + '" id="' + this.id + '-editor-lhs" class="mergely-column"><textarea style="" id="' + this.id + '-lhs"></textarea></div>'));
this.element.append(jQuery('<div class="mergely-canvas" style="height: ' + height + '"><canvas id="' + this.id + '-lhs-' + this.id + '-rhs-canvas" style="width:28px" width="28px" height="' + height + '"></canvas></div>'));
var rmargin = jQuery('<div class="mergely-margin" style="height: ' + height + '"><canvas id="' + this.id + '-rhs-margin" width="8px" height="' + height + '"></canvas></div>');
if (!this.settings.sidebar) {
this.element.find('.mergely-margin').css({display: 'none'});
}
if (this.settings.rhs_margin == 'left') {
this.element.append(rmargin);
}
this.element.append(jQuery('<div style="width:' + width + '; height:' + height + '" id="' + this.id + '-editor-rhs" class="mergely-column"><textarea style="" id="' + this.id + '-rhs"></textarea></div>'));
if (this.settings.rhs_margin != 'left') {
this.element.append(rmargin);
}
//codemirror
var cmstyle = '#' + this.id + ' .CodeMirror-gutter-text { padding: 5px 0 0 0; }' +
'#' + this.id + ' .CodeMirror-lines pre, ' + '#' + this.id + ' .CodeMirror-gutter-text pre { line-height: 18px; }' +
'.CodeMirror-linewidget { overflow: hidden; };';
if (this.settings.autoresize) {
cmstyle += this.id + ' .CodeMirror-scroll { height: 100%; overflow: auto; }';
}
// adjust the margin line height
cmstyle += '\n.CodeMirror { line-height: 18px; }';
jQuery('<style type="text/css">' + cmstyle + '</style>').appendTo('head');
//bind
var rhstx = jQuery('#' + this.id + '-rhs').get(0);
if (!rhstx) {
console.error('rhs textarea not defined - Mergely not initialized properly');
return;
}
var lhstx = jQuery('#' + this.id + '-lhs').get(0);
if (!rhstx) {
console.error('lhs textarea not defined - Mergely not initialized properly');
return;
}
var self = this;
this.editor = [];
this.editor[this.id + '-lhs'] = CodeMirror.fromTextArea(lhstx, this.lhs_cmsettings);
this.editor[this.id + '-rhs'] = CodeMirror.fromTextArea(rhstx, this.rhs_cmsettings);
this.editor[this.id + '-lhs'].on('change', function(){ if (self.settings.autoupdate) self._changing(self.id + '-lhs', self.id + '-rhs'); });
this.editor[this.id + '-lhs'].on('scroll', function(){ self._scrolling(self.id + '-lhs'); });
this.editor[this.id + '-rhs'].on('change', function(){ if (self.settings.autoupdate) self._changing(self.id + '-lhs', self.id + '-rhs'); });
this.editor[this.id + '-rhs'].on('scroll', function(){ self._scrolling(self.id + '-rhs'); });
// resize
if (this.settings.autoresize) {
var sz_timeout1 = null;
function sz(init) {
//self.em_height = null; //recalculate
if (self.settings.resize) self.settings.resize(init);
self.editor[self.id + '-lhs'].refresh();
self.editor[self.id + '-rhs'].refresh();
if (self.settings.autoupdate) {
self._changing(self.id + '-lhs', self.id + '-rhs');
}
};
jQuery(window).resize(
function () {
if (sz_timeout1) clearTimeout(sz_timeout1);
sz_timeout1 = setTimeout(sz, self.settings.resize_timeout);
}
);
sz(true);
}
//bind
var setv;
if (this.settings.lhs) {
setv = this.editor[this.id + '-lhs'].getDoc().setValue;
this.settings.lhs(setv.bind(this.editor[this.id + '-lhs'].getDoc()));
}
if (this.settings.rhs) {
setv = this.editor[this.id + '-rhs'].getDoc().setValue;
this.settings.rhs(setv.bind(this.editor[this.id + '-rhs'].getDoc()));
}
},
_scroll_to_change : function(change) {
if (!change) return;
var self = this;
var led = self.editor[self.id+'-lhs'];
var red = self.editor[self.id+'-rhs'];
var yref = led.getScrollerElement().offsetHeight * 0.5; // center between >0 and 1/2
// set cursors
led.setCursor(Math.max(change["lhs-line-from"],0), 0); // use led.getCursor().ch ?
red.setCursor(Math.max(change["rhs-line-from"],0), 0);
// using directly CodeMirror breaks canvas alignment
// var ly = led.charCoords({line: Math.max(change["lhs-line-from"],0), ch: 0}, "local").top;
// calculate scroll offset for current change. Warning: returns relative y position so we scroll to 0 first.
led.scrollTo(null, 0);
red.scrollTo(null, 0);
self._calculate_offsets(self.id+'-lhs', self.id+'-rhs', [change]);
led.scrollTo(null, Math.max(change["lhs-y-start"]-yref, 0));
red.scrollTo(null, Math.max(change["rhs-y-start"]-yref, 0));
// right pane should simply follows
},
_scrolling: function(editor_name) {
if (this._skipscroll[editor_name] === true) {
// scrolling one side causes the other to event - ignore it
this._skipscroll[editor_name] = false;
return;
}
var scroller = jQuery(this.editor[editor_name].getScrollerElement());
if (this.midway == undefined) {
this.midway = (scroller.height() / 2.0 + scroller.offset().top).toFixed(2);
}
// balance-line
var midline = this.editor[editor_name].coordsChar({left:0, top:this.midway});
var top_to = scroller.scrollTop();
var left_to = scroller.scrollLeft();
this.trace('scroll', 'side', editor_name);
this.trace('scroll', 'midway', this.midway);
this.trace('scroll', 'midline', midline);
this.trace('scroll', 'top_to', top_to);
this.trace('scroll', 'left_to', left_to);
var editor_name1 = this.id + '-lhs';
var editor_name2 = this.id + '-rhs';
for (var name in this.editor) {
if (!this.editor.hasOwnProperty(name)) continue;
if (editor_name == name) continue; //same editor
var this_side = editor_name.replace(this.id + '-', '');
var other_side = name.replace(this.id + '-', '');
var top_adjust = 0;
// find the last change that is less than or within the midway point
// do not move the rhs until the lhs end point is >= the rhs end point.
var last_change = null;
var force_scroll = false;
for (var i = 0; i < this.changes.length; ++i) {
var change = this.changes[i];
if ((midline.line >= change[this_side+'-line-from'])) {
last_change = change;
if (midline.line >= last_change[this_side+'-line-to']) {
if (!change.hasOwnProperty(this_side+'-y-start') ||
!change.hasOwnProperty(this_side+'-y-end') ||
!change.hasOwnProperty(other_side+'-y-start') ||
!change.hasOwnProperty(other_side+'-y-end')){
// change outside of viewport
force_scroll = true;
}
else {
top_adjust +=
(change[this_side+'-y-end'] - change[this_side+'-y-start']) -
(change[other_side+'-y-end'] - change[other_side+'-y-start']);
}
}
}
}
var vp = this.editor[name].getViewport();
var scroll = true;
if (last_change) {
this.trace('scroll', 'last change before midline', last_change);
if (midline.line >= vp.from && midline <= vp.to) {
scroll = false;
}
}
this.trace('scroll', 'scroll', scroll);
if (scroll || force_scroll) {
// scroll the other side
this.trace('scroll', 'scrolling other side', top_to - top_adjust);
this._skipscroll[name] = true;//disable next event
this.editor[name].scrollTo(left_to, top_to - top_adjust);
}
else this.trace('scroll', 'not scrolling other side');
if (this.settings.autoupdate) {
var timer = new Mgly.Timer();
this._calculate_offsets(editor_name1, editor_name2, this.changes);
this.trace('change', 'offsets time', timer.stop());
this._markup_changes(editor_name1, editor_name2, this.changes);
this.trace('change', 'markup time', timer.stop());
this._draw_diff(editor_name1, editor_name2, this.changes);
this.trace('change', 'draw time', timer.stop());
}
this.trace('scroll', 'scrolled');
}
},
_changing: function(editor_name1, editor_name2) {
this.trace('change', 'changing-timeout', this.changed_timeout);
var self = this;
if (this.changed_timeout != null) clearTimeout(this.changed_timeout);
this.changed_timeout = setTimeout(function(){
var timer = new Mgly.Timer();
self._changed(editor_name1, editor_name2);
self.trace('change', 'total time', timer.stop());
}, this.settings.change_timeout);
},
_changed: function(editor_name1, editor_name2) {
this._clear();
this._diff(editor_name1, editor_name2);
},
_clear: function() {
var self = this, name, editor, fns, timer, i, change, l;
function clear_changes() {
timer = new Mgly.Timer();
for (i = 0, l = editor.lineCount(); i < l; ++i) {
editor.removeLineClass(i, 'background');
}
for (i = 0; i < fns.length; ++i) {
//var edid = editor.getDoc().id;
change = fns[i];
//if (change.doc.id != edid) continue;
if (change.lines.length) {
self.trace('change', 'clear text', change.lines[0].text);
}
change.clear();
}
editor.clearGutter('merge');
self.trace('change', 'clear time', timer.stop());
};
for (name in this.editor) {
if (!this.editor.hasOwnProperty(name)) continue;
editor = this.editor[name];
fns = self.chfns[name];
// clear editor changes
editor.operation(clear_changes);
}
self.chfns[name] = [];
var ex = this._draw_info(this.id + '-lhs', this.id + '-rhs');
var ctx_lhs = ex.clhs.get(0).getContext('2d');
var ctx_rhs = ex.crhs.get(0).getContext('2d');
var ctx = ex.dcanvas.getContext('2d');
ctx_lhs.beginPath();
ctx_lhs.fillStyle = this.settings.bgcolor;
ctx_lhs.strokeStyle = '#888';
ctx_lhs.fillRect(0, 0, 6.5, ex.visible_page_height);
ctx_lhs.strokeRect(0, 0, 6.5, ex.visible_page_height);
ctx_rhs.beginPath();
ctx_rhs.fillStyle = this.settings.bgcolor;
ctx_rhs.strokeStyle = '#888';
ctx_rhs.fillRect(0, 0, 6.5, ex.visible_page_height);
ctx_rhs.strokeRect(0, 0, 6.5, ex.visible_page_height);
ctx.beginPath();
ctx.fillStyle = '#fff';
ctx.fillRect(0, 0, this.draw_mid_width, ex.visible_page_height);
},
_diff: function(editor_name1, editor_name2) {
var lhs = this.editor[editor_name1].getValue();
var rhs = this.editor[editor_name2].getValue();
var timer = new Mgly.Timer();
var d = new Mgly.diff(lhs, rhs, this.settings);
this.trace('change', 'diff time', timer.stop());
this.changes = Mgly.DiffParser(d.normal_form());
this.trace('change', 'parse time', timer.stop());
if (this._current_diff === undefined) {
// go to first difference on start-up
this._current_diff = 0;
this._scroll_to_change(this.changes[0]);
}
this.trace('change', 'scroll_to_change time', timer.stop());
this._calculate_offsets(editor_name1, editor_name2, this.changes);
this.trace('change', 'offsets time', timer.stop());
this._markup_changes(editor_name1, editor_name2, this.changes);
this.trace('change', 'markup time', timer.stop());
this._draw_diff(editor_name1, editor_name2, this.changes);
this.trace('change', 'draw time', timer.stop());
},
_parse_diff: function (editor_name1, editor_name2, diff) {
this.trace('diff', 'diff results:\n', diff);
var changes = [];
var change_id = 0;
// parse diff
var diff_lines = diff.split(/\n/);
for (var i = 0; i < diff_lines.length; ++i) {
if (diff_lines[i].length == 0) continue;
var change = {};
var test = this.change_exp.exec(diff_lines[i]);
if (test == null) continue;
// lines are zero-based
var fr = test[1].split(',');
change['lhs-line-from'] = fr[0] - 1;
if (fr.length == 1) change['lhs-line-to'] = fr[0] - 1;
else change['lhs-line-to'] = fr[1] - 1;
var to = test[3].split(',');
change['rhs-line-from'] = to[0] - 1;
if (to.length == 1) change['rhs-line-to'] = to[0] - 1;
else change['rhs-line-to'] = to[1] - 1;
// TODO: optimize for changes that are adds/removes
if (change['lhs-line-from'] < 0) change['lhs-line-from'] = 0;
if (change['lhs-line-to'] < 0) change['lhs-line-to'] = 0;
if (change['rhs-line-from'] < 0) change['rhs-line-from'] = 0;
if (change['rhs-line-to'] < 0) change['rhs-line-to'] = 0;
change['op'] = test[2];
changes[change_id++] = change;
this.trace('diff', 'change', change);
}
return changes;
},
_get_viewport: function(editor_name1, editor_name2) {
var lhsvp = this.editor[editor_name1].getViewport();
var rhsvp = this.editor[editor_name2].getViewport();
return {from: Math.min(lhsvp.from, rhsvp.from), to: Math.max(lhsvp.to, rhsvp.to)};
},
_is_change_in_view: function(vp, change) {
if (!this.settings.viewport) return true;
if ((change['lhs-line-from'] < vp.from && change['lhs-line-to'] < vp.to) ||
(change['lhs-line-from'] > vp.from && change['lhs-line-to'] > vp.to) ||
(change['rhs-line-from'] < vp.from && change['rhs-line-to'] < vp.to) ||
(change['rhs-line-from'] > vp.from && change['rhs-line-to'] > vp.to)) {
// if the change is outside the viewport, skip
return false;
}
return true;
},
_set_top_offset: function (editor_name1) {
// save the current scroll position of the editor
var saveY = this.editor[editor_name1].getScrollInfo().top;
// temporarily scroll to top
this.editor[editor_name1].scrollTo(null, 0);
// this is the distance from the top of the screen to the top of the
// content of the first codemirror editor
var topnode = jQuery('#' + this.id + ' .CodeMirror-measure').first();
var top_offset = topnode.offset().top - 4;
if(!top_offset) return false;
// restore editor's scroll position
this.editor[editor_name1].scrollTo(null, saveY);
this.draw_top_offset = 0.5 - top_offset;
return true;
},
_calculate_offsets: function (editor_name1, editor_name2, changes) {
if (this.em_height == null) {
if(!this._set_top_offset(editor_name1)) return; //try again
this.em_height = this.editor[editor_name1].defaultTextHeight();
if (!this.em_height) {
console.warn('Failed to calculate offsets, using 18 by default');
this.em_height = 18;
}
this.draw_lhs_min = 0.5;
var c = jQuery('#' + editor_name1 + '-' + editor_name2 + '-canvas');
if (!c.length) {
console.error('failed to find canvas', '#' + editor_name1 + '-' + editor_name2 + '-canvas');
}
if (!c.width()) {
console.error('canvas width is 0');
return;
}
this.draw_mid_width = jQuery('#' + editor_name1 + '-' + editor_name2 + '-canvas').width();
this.draw_rhs_max = this.draw_mid_width - 0.5; //24.5;
this.draw_lhs_width = 5;
this.draw_rhs_width = 5;
this.trace('calc', 'change offsets calculated', {top_offset: this.draw_top_offset, lhs_min: this.draw_lhs_min, rhs_max: this.draw_rhs_max, lhs_width: this.draw_lhs_width, rhs_width: this.draw_rhs_width});
}
var lhschc = this.editor[editor_name1].charCoords({line: 0});
var rhschc = this.editor[editor_name2].charCoords({line: 0});
var vp = this._get_viewport(editor_name1, editor_name2);
for (var i = 0; i < changes.length; ++i) {
var change = changes[i];
if (!this.settings.sidebar && !this._is_change_in_view(vp, change)) {
// if the change is outside the viewport, skip
delete change['lhs-y-start'];
delete change['lhs-y-end'];
delete change['rhs-y-start'];
delete change['rhs-y-end'];
continue;
}
var llf = change['lhs-line-from'] >= 0 ? change['lhs-line-from'] : 0;
var llt = change['lhs-line-to'] >= 0 ? change['lhs-line-to'] : 0;
var rlf = change['rhs-line-from'] >= 0 ? change['rhs-line-from'] : 0;
var rlt = change['rhs-line-to'] >= 0 ? change['rhs-line-to'] : 0;
var ls, le, rs, re, tls, tle, lhseh, lhssh, rhssh, rhseh;
if (this.editor[editor_name1].getOption('lineWrapping') || this.editor[editor_name2].getOption('lineWrapping')) {
// If using line-wrapping, we must get the height of the line
tls = this.editor[editor_name1].cursorCoords({line: llf, ch: 0}, 'page');
lhssh = this.editor[editor_name1].getLineHandle(llf);
ls = { top: tls.top, bottom: tls.top + lhssh.height };
tle = this.editor[editor_name1].cursorCoords({line: llt, ch: 0}, 'page');
lhseh = this.editor[editor_name1].getLineHandle(llt);
le = { top: tle.top, bottom: tle.top + lhseh.height };
tls = this.editor[editor_name2].cursorCoords({line: rlf, ch: 0}, 'page');
rhssh = this.editor[editor_name2].getLineHandle(rlf);
rs = { top: tls.top, bottom: tls.top + rhssh.height };
tle = this.editor[editor_name2].cursorCoords({line: rlt, ch: 0}, 'page');
rhseh = this.editor[editor_name2].getLineHandle(rlt);
re = { top: tle.top, bottom: tle.top + rhseh.height };
}
else {
// If not using line-wrapping, we can calculate the line position
ls = {
top: lhschc.top + llf * this.em_height,
bottom: lhschc.bottom + llf * this.em_height + 2
};
le = {
top: lhschc.top + llt * this.em_height,
bottom: lhschc.bottom + llt * this.em_height + 2
};
rs = {
top: rhschc.top + rlf * this.em_height,
bottom: rhschc.bottom + rlf * this.em_height + 2
};
re = {
top: rhschc.top + rlt * this.em_height,
bottom: rhschc.bottom + rlt * this.em_height + 2
};
}
if (change['op'] == 'a') {
// adds (right), normally start from the end of the lhs,
// except for the case when the start of the rhs is 0
if (rlf > 0) {
ls.top = ls.bottom;
ls.bottom += this.em_height;
le = ls;
}
}
else if (change['op'] == 'd') {
// deletes (left) normally finish from the end of the rhs,
// except for the case when the start of the lhs is 0
if (llf > 0) {
rs.top = rs.bottom;
rs.bottom += this.em_height;
re = rs;
}
}
change['lhs-y-start'] = this.draw_top_offset + ls.top;
if (change['op'] == 'c' || change['op'] == 'd') {
change['lhs-y-end'] = this.draw_top_offset + le.bottom;
}
else {
change['lhs-y-end'] = this.draw_top_offset + le.top;
}
change['rhs-y-start'] = this.draw_top_offset + rs.top;
if (change['op'] == 'c' || change['op'] == 'a') {
change['rhs-y-end'] = this.draw_top_offset + re.bottom;
}
else {
change['rhs-y-end'] = this.draw_top_offset + re.top;
}
this.trace('calc', 'change calculated', i, change);
}
return changes;
},
_markup_changes: function (editor_name1, editor_name2, changes) {
jQuery('.merge-button').remove(); // clear
var self = this;
var led = this.editor[editor_name1];
var red = this.editor[editor_name2];
var timer = new Mgly.Timer();
led.operation(function() {
for (var i = 0; i < changes.length; ++i) {
var change = changes[i];
var llf = change['lhs-line-from'] >= 0 ? change['lhs-line-from'] : 0;
var llt = change['lhs-line-to'] >= 0 ? change['lhs-line-to'] : 0;
var rlf = change['rhs-line-from'] >= 0 ? change['rhs-line-from'] : 0;
var rlt = change['rhs-line-to'] >= 0 ? change['rhs-line-to'] : 0;
var clazz = ['mergely', 'lhs', change['op'], 'cid-' + i];
led.addLineClass(llf, 'background', 'start');
led.addLineClass(llt, 'background', 'end');
if (llf == 0 && llt == 0 && rlf == 0) {
led.addLineClass(llf, 'background', clazz.join(' '));
led.addLineClass(llf, 'background', 'first');
}
else {
// apply change for each line in-between the changed lines
for (var j = llf; j <= llt; ++j) {
led.addLineClass(j, 'background', clazz.join(' '));
led.addLineClass(j, 'background', clazz.join(' '));
}
}
if (!red.getOption('readOnly')) {
// add widgets to lhs, if rhs is not read only
var rhs_button = self.merge_rhs_button.clone();
if (rhs_button.button) {
//jquery-ui support
rhs_button.button({icons: {primary: 'ui-icon-triangle-1-e'}, text: false});
}
rhs_button.addClass('merge-button');
rhs_button.attr('id', 'merge-rhs-' + i);
led.setGutterMarker(llf, 'merge', rhs_button.get(0));
}
}
});
var vp = this._get_viewport(editor_name1, editor_name2);
this.trace('change', 'markup lhs-editor time', timer.stop());
red.operation(function() {
for (var i = 0; i < changes.length; ++i) {
var change = changes[i];
var llf = change['lhs-line-from'] >= 0 ? change['lhs-line-from'] : 0;
var llt = change['lhs-line-to'] >= 0 ? change['lhs-line-to'] : 0;
var rlf = change['rhs-line-from'] >= 0 ? change['rhs-line-from'] : 0;
var rlt = change['rhs-line-to'] >= 0 ? change['rhs-line-to'] : 0;
if (!self._is_change_in_view(vp, change)) {
// if the change is outside the viewport, skip
continue;
}
var clazz = ['mergely', 'rhs', change['op'], 'cid-' + i];
red.addLineClass(rlf, 'background', 'start');
red.addLineClass(rlt, 'background', 'end');
if (rlf == 0 && rlt == 0 && llf == 0) {
red.addLineClass(rlf, 'background', clazz.join(' '));
red.addLineClass(rlf, 'background', 'first');
}
else {
// apply change for each line in-between the changed lines
for (var j = rlf; j <= rlt; ++j) {
red.addLineClass(j, 'background', clazz.join(' '));
red.addLineClass(j, 'background', clazz.join(' '));
}
}
if (!led.getOption('readOnly')) {
// add widgets to rhs, if lhs is not read only
var lhs_button = self.merge_lhs_button.clone();
if (lhs_button.button) {
//jquery-ui support
lhs_button.button({icons: {primary: 'ui-icon-triangle-1-w'}, text: false});
}
lhs_button.addClass('merge-button');
lhs_button.attr('id', 'merge-lhs-' + i);
red.setGutterMarker(rlf, 'merge', lhs_button.get(0));
}
}
});
this.trace('change', 'markup rhs-editor time', timer.stop());
// mark text deleted, LCS changes
var marktext = [], i, j, k, p;
for (i = 0; this.settings.lcs && i < changes.length; ++i) {
var change = changes[i];
var llf = change['lhs-line-from'] >= 0 ? change['lhs-line-from'] : 0;
var llt = change['lhs-line-to'] >= 0 ? change['lhs-line-to'] : 0;
var rlf = change['rhs-line-from'] >= 0 ? change['rhs-line-from'] : 0;
var rlt = change['rhs-line-to'] >= 0 ? change['rhs-line-to'] : 0;
if (!this._is_change_in_view(vp, change)) {
// if the change is outside the viewport, skip
continue;
}
if (change['op'] == 'd') {
// apply delete to cross-out (left-hand side only)
var from = llf;
var to = llt;
var to_ln = led.lineInfo(to);
if (to_ln) {
marktext.push([led, {line:from, ch:0}, {line:to, ch:to_ln.text.length}, {className: 'mergely ch d lhs'}]);
}
}
else if (change['op'] == 'c') {
// apply LCS changes to each line
for (j = llf, k = rlf, p = 0;
((j >= 0) && (j <= llt)) || ((k >= 0) && (k <= rlt));
++j, ++k) {
var lhs_line, rhs_line;
if (k + p > rlt) {
// lhs continues past rhs, mark lhs as deleted
lhs_line = led.getLine( j );
marktext.push([led, {line:j, ch:0}, {line:j, ch:lhs_line.length}, {className: 'mergely ch d lhs'}]);
continue;
}
if (j + p > llt) {
// rhs continues past lhs, mark rhs as added
rhs_line = red.getLine( k );
marktext.push([red, {line:k, ch:0}, {line:k, ch:rhs_line.length}, {className: 'mergely ch a rhs'}]);
continue;
}
lhs_line = led.getLine( j );
rhs_line = red.getLine( k );
var lcs = new Mgly.LCS(lhs_line, rhs_line);
lcs.diff(
function added (from, to) {
marktext.push([red, {line:k, ch:from}, {line:k, ch:to}, {className: 'mergely ch a rhs'}]);
},
function removed (from, to) {
marktext.push([led, {line:j, ch:from}, {line:j, ch:to}, {className: 'mergely ch d lhs'}]);
}
);
}
}
}
this.trace('change', 'LCS marktext time', timer.stop());
// mark changes outside closure
led.operation(function() {
// apply lhs markup
for (var i = 0; i < marktext.length; ++i) {
var m = marktext[i];
if (m[0].doc.id != led.getDoc().id) continue;
self.chfns[self.id + '-lhs'].push(m[0].markText(m[1], m[2], m[3]));
}
});
red.operation(function() {
// apply lhs markup
for (var i = 0; i < marktext.length; ++i) {
var m = marktext[i];
if (m[0].doc.id != red.getDoc().id) continue;
self.chfns[self.id + '-rhs'].push(m[0].markText(m[1], m[2], m[3]));
}
});
this.trace('change', 'LCS markup time', timer.stop());
// merge buttons
var ed = {lhs:led, rhs:red};
jQuery('.merge-button').on('click', function(ev){
// side of mouseenter
var side = 'rhs';
var oside = 'lhs';
var parent = jQuery(this).parents('#' + self.id + '-editor-lhs');
if (parent.length) {
side = 'lhs';
oside = 'rhs';
}
var pos = ed[side].coordsChar({left:ev.pageX, top:ev.pageY});
// get the change id
var cid = null;
var info = ed[side].lineInfo(pos.line);
jQuery.each(info.bgClass.split(' '), function(i, clazz) {
if (clazz.indexOf('cid-') == 0) {
cid = parseInt(clazz.split('-')[1], 10);
return false;
}
});
var change = self.changes[cid];
self._merge_change(change, side, oside);
return false;
});
this.trace('change', 'markup buttons time', timer.stop());
},
_merge_change : function(change, side, oside) {
if (!change) return;
var led = this.editor[this.id+'-lhs'];
var red = this.editor[this.id+'-rhs'];
var ed = {lhs:led, rhs:red};
var i, from, to;
var text = ed[side].getRange(
CodeMirror.Pos(change[side + '-line-from'], 0),
CodeMirror.Pos(change[side + '-line-to'] + 1, 0));
if (change['op'] == 'c') {
ed[oside].replaceRange(text,
CodeMirror.Pos(change[oside + '-line-from'], 0),
CodeMirror.Pos(change[oside + '-line-to'] + 1, 0));
}
else if (side == 'rhs') {
if (change['op'] == 'a') {
ed[oside].replaceRange(text,
CodeMirror.Pos(change[oside + '-line-from'] + 1, 0),
CodeMirror.Pos(change[oside + '-line-to'] + 1, 0));
}
else {// 'd'
from = parseInt(change[oside + '-line-from'], 10);
to = parseInt(change[oside + '-line-to'], 10);
for (i = to; i >= from; --i) {
ed[oside].setCursor({line: i, ch: -1});
ed[oside].execCommand('deleteLine');
}
}
}
else if (side == 'lhs') {
if (change['op'] == 'a') {
from = parseInt(change[oside + '-line-from'], 10);
to = parseInt(change[oside + '-line-to'], 10);
for (i = to; i >= from; --i) {
//ed[oside].removeLine(i);
ed[oside].setCursor({line: i, ch: -1});
ed[oside].execCommand('deleteLine');
}
}
else {// 'd'
ed[oside].replaceRange( text,
CodeMirror.Pos(change[oside + '-line-from'] + 1, 0));
}
}
//reset
ed['lhs'].setValue(ed['lhs'].getValue());
ed['rhs'].setValue(ed['rhs'].getValue());
this._scroll_to_change(change);
},
_draw_info: function(editor_name1, editor_name2) {
var visible_page_height = jQuery(this.editor[editor_name1].getScrollerElement()).height();
var gutter_height = jQuery(this.editor[editor_name1].getScrollerElement()).children(':first-child').height();
var dcanvas = document.getElementById(editor_name1 + '-' + editor_name2 + '-canvas');
if (dcanvas == undefined) throw 'Failed to find: ' + editor_name1 + '-' + editor_name2 + '-canvas';
var clhs = jQuery('#' + this.id + '-lhs-margin');
var crhs = jQuery('#' + this.id + '-rhs-margin');
return {
visible_page_height: visible_page_height,
gutter_height: gutter_height,
visible_page_ratio: (visible_page_height / gutter_height),
margin_ratio: (visible_page_height / gutter_height),
lhs_scroller: jQuery(this.editor[editor_name1].getScrollerElement()),
rhs_scroller: jQuery(this.editor[editor_name2].getScrollerElement()),
lhs_lines: this.editor[editor_name1].lineCount(),
rhs_lines: this.editor[editor_name2].lineCount(),
dcanvas: dcanvas,
clhs: clhs,
crhs: crhs,
lhs_xyoffset: jQuery(clhs).offset(),
rhs_xyoffset: jQuery(crhs).offset()
};
},
_draw_diff: function(editor_name1, editor_name2, changes) {
var ex = this._draw_info(editor_name1, editor_name2);
var mcanvas_lhs = ex.clhs.get(0);
var mcanvas_rhs = ex.crhs.get(0);
var ctx = ex.dcanvas.getContext('2d');
var ctx_lhs = mcanvas_lhs.getContext('2d');
var ctx_rhs = mcanvas_rhs.getContext('2d');
this.trace('draw', 'visible_page_height', ex.visible_page_height);
this.trace('draw', 'gutter_height', ex.gutter_height);
this.trace('draw', 'visible_page_ratio', ex.visible_page_ratio);
this.trace('draw', 'lhs-scroller-top', ex.lhs_scroller.scrollTop());
this.trace('draw', 'rhs-scroller-top', ex.rhs_scroller.scrollTop());
jQuery.each(jQuery.find('#' + this.id + ' canvas'), function () {
jQuery(this).get(0).height = ex.visible_page_height;
});
ex.clhs.unbind('click');
ex.crhs.unbind('click');
ctx_lhs.beginPath();
ctx_lhs.fillStyle = this.settings.bgcolor;
ctx_lhs.strokeStyle = '#888';
ctx_lhs.fillRect(0, 0, 6.5, ex.visible_page_height);
ctx_lhs.strokeRect(0, 0, 6.5, ex.visible_page_height);
ctx_rhs.beginPath();
ctx_rhs.fillStyle = this.settings.bgcolor;
ctx_rhs.strokeStyle = '#888';
ctx_rhs.fillRect(0, 0, 6.5, ex.visible_page_height);
ctx_rhs.strokeRect(0, 0, 6.5, ex.visible_page_height);
var vp = this._get_viewport(editor_name1, editor_name2);
for (var i = 0; i < changes.length; ++i) {
var change = changes[i];
this.trace('draw', change);
// margin indicators
var lhs_y_start = ((change['lhs-y-start'] + ex.lhs_scroller.scrollTop()) * ex.visible_page_ratio);
var lhs_y_end = ((change['lhs-y-end'] + ex.lhs_scroller.scrollTop()) * ex.visible_page_ratio) + 1;
var rhs_y_start = ((change['rhs-y-start'] + ex.rhs_scroller.scrollTop()) * ex.visible_page_ratio);
var rhs_y_end = ((change['rhs-y-end'] + ex.rhs_scroller.scrollTop()) * ex.visible_page_ratio) + 1;
this.trace('draw', 'marker calculated', lhs_y_start, lhs_y_end, rhs_y_start, rhs_y_end);
ctx_lhs.beginPath();
ctx_lhs.fillStyle = this.settings.fgcolor[(this._current_diff==i?'c':'')+change['op']];
ctx_lhs.strokeStyle = '#000';
ctx_lhs.lineWidth = 0.5;
ctx_lhs.fillRect(1.5, lhs_y_start, 4.5, Math.max(lhs_y_end - lhs_y_start, 5));
ctx_lhs.strokeRect(1.5, lhs_y_start, 4.5, Math.max(lhs_y_end - lhs_y_start, 5));
ctx_rhs.beginPath();
ctx_rhs.fillStyle = this.settings.fgcolor[(this._current_diff==i?'c':'')+change['op']];
ctx_rhs.strokeStyle = '#000';
ctx_rhs.lineWidth = 0.5;
ctx_rhs.fillRect(1.5, rhs_y_start, 4.5, Math.max(rhs_y_end - rhs_y_start, 5));
ctx_rhs.strokeRect(1.5, rhs_y_start, 4.5, Math.max(rhs_y_end - rhs_y_start, 5));
if (!this._is_change_in_view(vp, change)) {
continue;
}
lhs_y_start = change['lhs-y-start'];
lhs_y_end = change['lhs-y-end'];
rhs_y_start = change['rhs-y-start'];
rhs_y_end = change['rhs-y-end'];
var radius = 3;
// draw left box
ctx.beginPath();
ctx.strokeStyle = this.settings.fgcolor[(this._current_diff==i?'c':'')+change['op']];
ctx.lineWidth = (this._current_diff==i) ? 1.5 : 1;
var rectWidth = this.draw_lhs_width;
var rectHeight = lhs_y_end - lhs_y_start - 1;
var rectX = this.draw_lhs_min;
var rectY = lhs_y_start;
// top and top top-right corner
// draw left box
ctx.moveTo(rectX, rectY);
if (navigator.appName == 'Microsoft Internet Explorer') {
// IE arcs look awful
ctx.lineTo(this.draw_lhs_min + this.draw_lhs_width, lhs_y_start);
ctx.lineTo(this.draw_lhs_min + this.draw_lhs_width, lhs_y_end + 1);
ctx.lineTo(this.draw_lhs_min, lhs_y_end + 1);
}
else {
if (rectHeight <= 0) {
ctx.lineTo(rectX + rectWidth, rectY);
}
else {
ctx.arcTo(rectX + rectWidth, rectY, rectX + rectWidth, rectY + radius, radius);
ctx.arcTo(rectX + rectWidth, rectY + rectHeight, rectX + rectWidth - radius, rectY + rectHeight, radius);
}
// bottom line
ctx.lineTo(rectX, rectY + rectHeight);
}
ctx.stroke();
rectWidth = this.draw_rhs_width;
rectHeight = rhs_y_end - rhs_y_start - 1;
rectX = this.draw_rhs_max;
rectY = rhs_y_start;
// draw right box
ctx.moveTo(rectX, rectY);
if (navigator.appName == 'Microsoft Internet Explorer') {
ctx.lineTo(this.draw_rhs_max - this.draw_rhs_width, rhs_y_start);
ctx.lineTo(this.draw_rhs_max - this.draw_rhs_width, rhs_y_end + 1);
ctx.lineTo(this.draw_rhs_max, rhs_y_end + 1);
}
else {
if (rectHeight <= 0) {
ctx.lineTo(rectX - rectWidth, rectY);
}
else {
ctx.arcTo(rectX - rectWidth, rectY, rectX - rectWidth, rectY + radius, radius);
ctx.arcTo(rectX - rectWidth, rectY + rectHeight, rectX - radius, rectY + rectHeight, radius);
}
ctx.lineTo(rectX, rectY + rectHeight);
}
ctx.stroke();
// connect boxes
var cx = this.draw_lhs_min + this.draw_lhs_width;
var cy = lhs_y_start + (lhs_y_end + 1 - lhs_y_start) / 2.0;
var dx = this.draw_rhs_max - this.draw_rhs_width;
var dy = rhs_y_start + (rhs_y_end + 1 - rhs_y_start) / 2.0;
ctx.moveTo(cx, cy);
if (cy == dy) {
ctx.lineTo(dx, dy);
}
else {
// fancy!
ctx.bezierCurveTo(
cx + 12, cy - 3, // control-1 X,Y
dx - 12, dy - 3, // control-2 X,Y
dx, dy);
}
ctx.stroke();
}
// visible window feedback
ctx_lhs.fillStyle = this.settings.vpcolor;
ctx_rhs.fillStyle = this.settings.vpcolor;
var lto = ex.clhs.height() * ex.visible_page_ratio;
var lfrom = (ex.lhs_scroller.scrollTop() / ex.gutter_height) * ex.clhs.height();
var rto = ex.crhs.height() * ex.visible_page_ratio;
var rfrom = (ex.rhs_scroller.scrollTop() / ex.gutter_height) * ex.crhs.height();
this.trace('draw', 'cls.height', ex.clhs.height());
this.trace('draw', 'lhs_scroller.scrollTop()', ex.lhs_scroller.scrollTop());
this.trace('draw', 'gutter_height', ex.gutter_height);
this.trace('draw', 'visible_page_ratio', ex.visible_page_ratio);
this.trace('draw', 'lhs from', lfrom, 'lhs to', lto);
this.trace('draw', 'rhs from', rfrom, 'rhs to', rto);
ctx_lhs.fillRect(1.5, lfrom, 4.5, lto);
ctx_rhs.fillRect(1.5, rfrom, 4.5, rto);
ex.clhs.click(function (ev) {
var y = ev.pageY - ex.lhs_xyoffset.top - (lto / 2);
var sto = Math.max(0, (y / mcanvas_lhs.height) * ex.lhs_scroller.get(0).scrollHeight);
ex.lhs_scroller.scrollTop(sto);
});
ex.crhs.click(function (ev) {
var y = ev.pageY - ex.rhs_xyoffset.top - (rto / 2);
var sto = Math.max(0, (y / mcanvas_rhs.height) * ex.rhs_scroller.get(0).scrollHeight);
ex.rhs_scroller.scrollTop(sto);
});
},
trace: function(name) {
if(this.settings._debug.indexOf(name) >= 0) {
arguments[0] = name + ':';
console.log([].slice.apply(arguments));
}
}
});
jQuery.pluginMaker = function(plugin) {
// add the plugin function as a jQuery plugin
jQuery.fn[plugin.prototype.name] = function(options) {
// get the arguments
var args = jQuery.makeArray(arguments),
after = args.slice(1);
var rc;
this.each(function() {
// see if we have an instance
var instance = jQuery.data(this, plugin.prototype.name);
if (instance) {
// call a method on the instance
if (typeof options == "string") {
rc = instance[options].apply(instance, after);
} else if (instance.update) {
// call update on the instance
return instance.update.apply(instance, args);
}
} else {
// create the plugin
var _plugin = new plugin(this, options);
}
});
if (rc != undefined) return rc;
};
};
// make the mergely widget
jQuery.pluginMaker(Mgly.mergely);
})( window, document, jQuery, CodeMirror ); | PypiClean |
/CAMELS_library-0.3.tar.gz/CAMELS_library-0.3/scripts/autoencoder/main.py | import numpy as np
import sys,os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.utils as vutils
import data
import architecture
import matplotlib.pyplot as plt
import utils
################################### INPUT ############################################
# images parameters
f_images = '/mnt/ceph/users/camels/Results/GAN/Images_T.npy'
seed = 5 #to split images between training, validation and testing sets
grid = 64
examples = 16 # when saving images for different epochs
# architecture parameters
lr = 1e-4
hidden = 32
wd = 1e-11
epochs = 100000
batch_size = 128
BN_dim = 500
# output files
root_out = '/mnt/ceph/users/camels/Results/autoencoder'
f_loss = '%s/losses/loss_model_a_32_wd=1e-11_noise_10_500dim.txt'%root_out
f_model = '%s/models/model_a_32_wd=1e-11_noise_10_500dim.pt'%root_out
######################################################################################
# use GPUs if available
GPU = torch.cuda.is_available()
device = torch.device("cuda" if GPU else "cpu")
print('GPU:',GPU)
print('Training on',device)
cudnn.benchmark = True #May train faster but cost more memory
# define loss function
criterion = nn.MSELoss()
#criterion = nn.L1Loss()
# get the data
train_loader = data.create_dataset('train', seed, f_images, grid, batch_size,
verbose=True)
valid_loader = data.create_dataset('valid', seed, f_images, grid, batch_size,
verbose=True)
# define the model
#model = architecture.autoencoder_64a(BN_dim, hidden).to(device)
model = architecture.autoencoder_64h(BN_dim, hidden).to(device)
#model = architecture.autoencoder_64d(BN_dim, hidden).to(device)
#model = architecture.autoencoder_64e(BN_dim, hidden).to(device)
#model = architecture.autoencoder_64f(BN_dim, hidden).to(device)
#model = architecture.autoencoder_64g(BN_dim, hidden).to(device)
#model = architecture.autoencoder_64c(BN_dim, hidden).to(device)
#model = architecture.autoencoder_64b(BN_dim, hidden).to(device)
# load best-models, if they exists
if os.path.exists(f_model): model.load_state_dict(torch.load(f_model))
else: model.apply(architecture.weights_init)
# define the optimizer
optimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=wd)
# get the tensor for the images
for valid_maps,idxs in valid_loader:
test_maps = (valid_maps[:examples]).to(device)
break
# do a loop over all the epochs
min_loss = 1e8
for epoch in range(epochs):
# training
train_loss, num_maps = 0.0, 0
model.train()
for train_maps,idxs in train_loader:
train_maps = train_maps.to(device)
input_maps = train_maps + torch.randn_like(train_maps, device=device)*0.1
#recon_maps = model(train_maps)
recon_maps = model(input_maps)
#train_maps_fft = torch.rfft(train_maps, 2)
#recon_maps_fft = torch.rfft(recon_maps, 2)
#loss = ((train_maps_fft-recon_maps_fft)**2).mean()
loss = criterion(recon_maps, train_maps)
train_loss += (loss.cpu().item())*train_maps.shape[0]
num_maps += train_maps.shape[0]
optimizer.zero_grad()
loss.backward()
#if epoch%25==0:
# utils.save_gradients('%s/gradients/gradients_g_%d.txt'%(root_out,epoch), model)
optimizer.step()
train_loss = train_loss/num_maps
# validation
valid_loss, num_maps = 0.0, 0
model.eval()
for valid_maps,idxs in valid_loader:
with torch.no_grad():
valid_maps = valid_maps.to(device)
recon_maps = model(valid_maps)
loss = criterion(recon_maps, valid_maps)
valid_loss += (loss.cpu().item())*valid_maps.shape[0]
num_maps += valid_maps.shape[0]
valid_loss = valid_loss/num_maps
# verbose
if valid_loss<min_loss:
min_loss = valid_loss
torch.save(model.state_dict(), f_model)
print('Epoch %d: %.3e %.3e (saving)'%(epoch, train_loss, valid_loss))
else:
print('Epoch %d: %.3e %.3e'%(epoch, train_loss, valid_loss))
# save losses
f = open(f_loss, 'a')
f.write('%d %.3e %.3e\n'%(epoch, train_loss, valid_loss))
f.close()
"""
# plot some images & save models every 10 epochs
if epoch%25==0:
model.eval()
with torch.no_grad():
recon_maps = model(test_maps)
vutils.save_image(torch.cat([test_maps,recon_maps]),
'%s/images/images_g_%d.png'%(root_out,epoch),\
normalize=True, nrow=examples, range=(-1.0, 1.0))
""" | PypiClean |
/Guerilla-1.0.2.tar.gz/Guerilla-1.0.2/guerilla/play/search.py | from abc import ABCMeta, abstractmethod
import chess
import math
import time
import numpy as np
from collections import namedtuple
from search_helpers import quickselect, material_balance
import guerilla.data_handler as dh
# Note:
# Moves are stored as UCI strings
# Leaf FEN is stripped
Transposition = namedtuple("Transposition", "best_move value leaf_fen node_type")
PV_NODE = 0
CUT_NODE = 1
ALL_NODE = 2
LEAF_NODE = 3 # Additional type, we use it to demark nodes which were leafs to reduce the # of evaluation
class TranspositionTable:
def __init__(self, exact_depth=True):
self.table = {} # Main transposition table {FEN: Transposition Entry}
self.exact_depth = exact_depth # If we require an exact depth match
self.cache_hits = {} # Cache hits by depth
self.cache_miss = 0
self.num_transpositions = 0
def __str__(self):
return "[TT] {} entries | {} transpositions".format(len(self.table), self.num_transpositions)
def fetch(self, fen, requested_depth):
"""
Fetches the transposition for the input FEN
:param fen: [String] Input FEN for which depth is queried.
:param requested_depth: [Int] Requested depth. Effect depends on self.exact_depth:
(True): Returns a transposi tion for which the input FEN was searched to EXACTLY requested_depth.
(False): Return a transposition for which the input FEN was search to AT LEAST requested_depth.
:return:
"""
# Returns {Best Move, Value, Type, Depth}
# Check for entry in table
white_fen = dh.flip_to_white(fen)
# print "Fetching ORIGINAL {} WHITE {}".format(fen, white_fen)
entry = self._get_entry(white_fen)
if not entry or entry.deepest < requested_depth:
self.cache_miss += 1
return
# If not using exact depth, then get deepest depth
if not self.exact_depth:
requested_depth = entry.deepest
# Fetch depth result from table
if requested_depth in entry.value_dict:
if requested_depth not in self.cache_hits:
self.cache_hits[requested_depth]= 0
self.cache_hits[requested_depth] += 1
transpo = entry.value_dict[requested_depth]
# If black is next then flip move AND leaf fen
if dh.black_is_next(fen):
transpo = self._flip_transposition(transpo)
return transpo
else:
self.cache_miss += 1
def update(self, fen, search_depth, best_move, score, leaf_fen, node_type):
# Flip to white
transpo = Transposition(best_move, score, leaf_fen, node_type)
# Flip transposition if necessary
if dh.black_is_next(fen):
transpo = self._flip_transposition(transpo)
entry = self._get_entry(dh.flip_to_white(fen), create=True)
if search_depth not in entry.value_dict:
self.num_transpositions += 1
entry.add_depth(search_depth, transpo)
def exists(self, fen):
assert (dh.white_is_next(fen))
return dh.strip_fen(fen) in self.table
def _get_entry(self, fen, create=False):
assert (dh.white_is_next(fen))
key = dh.strip_fen(fen)
if key not in self.table and create:
self.table[key] = TranpositionEntry()
return self.table.get(key)
def _flip_transposition(self, entry):
best_move = dh.flip_move(entry.best_move) if entry.best_move is not None else entry.best_move
leaf_fen = dh.flip_board(entry.leaf_fen)
return Transposition(best_move, entry.value, leaf_fen, entry.node_type)
def clear(self):
# Clears the transposition table
self.table.clear()
self.cache_hits = {}
self.cache_miss = 0
def get_value(self, fen):
"""
Returns the value for the input FEN, and the depth where that value comes from.
:param fen: [String] FEN.
:return:
[(Transposition, depth)] If FEN exists in cache else [(None, None)]
"""
result = (None, None)
# Check for entry entry
white_fen = dh.flip_to_white(fen)
entry = self._get_entry(white_fen)
# Get deepest transposition with an exact value
if entry is not None and entry.deepest_value is not None:
transpo = entry.value_dict[entry.deepest_value]
# Flip if necessary
if dh.black_is_next(fen):
transpo = self._flip_transposition(transpo)
result = (transpo, entry.deepest_value)
return result
class TranpositionEntry:
def __init__(self):
self.value_dict = {} # {Depth: Transposition}
self.deepest = None # Deepest transposition
self.deepest_value = None # Deepest transposition for which we have an exact value (Leaf or PV Node)
def add_depth(self, depth, transposition):
# Update value dict and deepest
self.value_dict[depth] = transposition
self.deepest = max(depth, self.deepest)
# Update deepest value
if transposition.node_type == PV_NODE or transposition.node_type == LEAF_NODE:
self.deepest_value = max(depth, self.deepest_value)
class Search:
__metaclass__ = ABCMeta
def __init__(self, evaluation_function):
"""
Initializes the Player abstract class.
Input:
evaluation_function [function]:
function to evaluate leaf nodes (usually the neural net)
"""
self.evaluation_function = evaluation_function
self.win_value = dh.WIN_VALUE
self.lose_value = dh.LOSE_VALUE
self.draw_value = dh.DRAW_VALUE
self.num_evals = 0
self.num_visits = [] # Index: depth, Value: Number of vists at that depth
self.depth_reached = 0
# Transposition table
self.tt = TranspositionTable(exact_depth=True) # TODO: We can change this
def evaluate(self, fen):
# Returns the value of the input FEN from the perspective of the next player to play
# Depth is used to update information
self.num_evals += 1
# Evaluate
board = chess.Board(fen)
if board.is_checkmate():
return self.lose_value
elif board.can_claim_draw() or board.is_stalemate():
return self.draw_value
else:
return self.evaluation_function(dh.flip_to_white(fen))
@abstractmethod
def __str__(self):
raise NotImplementedError("You should never see this")
@abstractmethod
def run(self, board, time_limit=None, clear_cache=False):
"""
Runs search based on parameter.
Inputs:
board [chess.Board]:
current state of board
Outputs:
best_move [chess.Move]:
Best move to play
best_value [float]:
value achieved by best move
best_leaf [String]
FEN of the board of the leaf node which yielded the highest value.
"""
raise NotImplementedError("You should never see this")
def reset(self):
# Clears the transposition table
self.tt.clear()
# Reset some logging variables
self.num_evals = 0
self.num_visits = []
self.depth_reached = 0
class IterativeDeepening(Search):
"""
Searches game tree in an Iterative Deepening Depth search.
At each depth optionally prune from remaining possibilities
Implements alpha_beta pruning by default
"""
def __init__(self, evaluation_function, time_limit=10,
max_depth=None, h_prune=False, prune_perc=0.0,
ab_prune=True, verbose=True, use_partial_search=False):
"""
Constructor
Inputs:
evaluation_function[function]:
function used to evaluate leaf nodes
time_limit[float]:
time limit per move
max_depth[int]:
If not None, limit depth to max_depth
h_prune[bool]:
Heuristic_prune. If true, prune between between depth-limited-searches
prune_perc[float range([0,1])]:
Percent of nodes to prune for heuristic prune
ab_prune[bool]:
Alpha-beta pruning. Same results on or off, only set to off for td training
use_partial_search [Bool]:
Whether or not to use partial search results (i.e. when a timeout occurs during DFS).
"""
super(IterativeDeepening, self).__init__(evaluation_function)
self.time_limit = time_limit
self.buff_time = time_limit * 0.02
self.depth_limit = 1 # Depth limit for DFS
self.max_depth = max_depth
self.order_moves = True # Whether moves should be ordered
self.h_prune = h_prune
self.prune_perc = prune_perc
self.ab_prune = ab_prune
self.root = None # holds root node
self.is_partial_search = False # Marks if the last DFS call was a partial search
self.use_partial_search = use_partial_search
# Holds the Killer Moves by depth. Each Entry is (set of moves, sorted array of (value, moves)).
self.killer_table = None
self.num_killer = 2 # Number of killer moves store for each depth
self._reset_killer()
# Move value for ordering when board not found in transposition table
self.order_fn_fast = material_balance
def __str__(self):
return "IterativeDeepening"
def _uci_to_move(self, uci):
return chess.Move.from_uci(uci) if uci is not None else uci
def DLS(self, node, alpha, beta):
"""
Recursive depth limited negamax search with alpha_beta pruning.
Source: https://en.wikipedia.org/wiki/Negamax#Negamax_with_alpha_beta_pruning_and_transposition_tables
:param node: [SearchNode] Roote node for search.
:param alpha: [Float] Lower bound.
:param beta: [Float] Upper bound.
:return:
best_score [float]:
Score achieved by best move
best_move [chess.Move]:
Best move to play
best_leaf [String]
FEN of the board of the leaf node which yielded the highest value.
"""
# Track some info
if not node.visited:
# We've never seen this node before -> Track some info
try:
self.num_visits[node.depth] += 1
except IndexError:
self.num_visits.append(1)
self.depth_reached = max(self.depth_reached, node.depth)
# Check if a new killer table entry should be created
if node.depth >= len(self.killer_table):
self.killer_table.append({'moves': set(), 'values': list()})
node.visited = True
alpha_original = alpha
# Check transposition table
result = self.tt.fetch(node.fen, requested_depth=self.depth_limit - node.depth)
if result:
if result.node_type == PV_NODE:
return result.value, self._uci_to_move(result.best_move), result.leaf_fen
elif result.node_type == CUT_NODE:
# lower bound
alpha = max(alpha, result.value)
elif result.node_type == ALL_NODE:
# upper bound
beta = min(beta, result.value)
if alpha >= beta:
return result.value, self._uci_to_move(result.best_move), result.leaf_fen
# Check children
if not node.children:
node.gen_children()
# Check if limit reached
self.time_left = (time.time() - self.start_time) <= self.time_limit - self.buff_time
if node.depth == self.depth_limit or not node.expand or not self.time_left or not node.children:
# Evaluate IF: depth limit reached, pruned, no time left OR no children
if not self.time_left:
self.is_partial_search = True
# Check if we have previously evaluated this node as a leaf node
if result and result.node_type == LEAF_NODE:
return result.value, None, node.fen
# Otherwise evaluate node
node.value = self.evaluate(node.fen)
# Update transposition table
self.tt.update(node.fen, 0, None, node.value, node.fen, LEAF_NODE)
return node.value, None, node.fen
# Get Ordered children
moves = self.get_ordered_moves(node) if self.order_moves else node.child_moves
# Find best move (recursive)
best_value = float("-inf")
best_move = None
leaf_fen = None
for move in moves:
# Get best score for opposing player and flip it to your perspective
value, _, lf = self.DLS(node.children[move], alpha=-beta, beta=-alpha)
value = -value
if value > best_value:
best_move = move
best_value = value
leaf_fen = lf
# Check for pruning
alpha = max(alpha, value)
if alpha >= beta:
self.update_killer(move, value, node.depth)
break
# Update transposition table
if best_value <= alpha_original:
# ALL NODES searched, no good moves found -> value is an upper bound
node_type = ALL_NODE
elif best_value >= beta:
# CUT NODE, pruning occurred -> value is a lower bound
node_type = CUT_NODE
else:
# PV NODE Otherwise its potentially part of the the principal variation
node_type = PV_NODE
# Update transposition table
self.tt.update(node.fen, self.depth_limit - node.depth, best_move, best_value, leaf_fen, node_type)
# Return result of search
return best_value, self._uci_to_move(best_move), leaf_fen
def get_ordered_moves(self, node):
"""
Orders the child moves of the node.
Ordering is based on:
(1) Killer moves
(2) Moves for which we have a value, ordering by (-depth, value) in increasing order
(3) Other moves
:param node: [SearchNode] Node who's child moves we need to order.
:return: [List of Strings] Ordered moves
"""
killer_moves = [] # Children that are "killer" moves
value_moves = [] # Moves with values
other_moves = []
for move in node.child_moves:
child_fen = node.children[move].fen
# Favor killer moves
if self.is_killer(move, node.depth):
killer_moves.append((move, self.order_fn_fast(child_fen)))
continue
# Check if we have an estimate for the move value
# Assign it to a group accordingly
transpo, depth = self.tt.get_value(child_fen)
if transpo:
# Note: take negative of depth since want to look at moves scored deeper first
value_moves.append((move, (-depth, transpo.value)))
else:
other_moves.append((move, self.order_fn_fast(child_fen)))
# Order in ascending order (want to check boards which are BAD for opponent first)
killer_moves.sort(key=lambda x: x[1])
value_moves.sort(key=lambda x: x[1])
other_moves.sort(key=lambda x: x[1])
moves = killer_moves + value_moves + other_moves
assert(len(moves) == len(node.child_moves))
move_order = [x[0] for x in moves]
return move_order
def update_killer(self, killer_move, value, depth):
"""
Updates the killer move table.
Input:
killer_move [Chess.Move]
The move which caused the A-B pruning to trigger.
value [Float]
The value yielded by the killer move.
depth [Int]
The depth FROM which the move was played.
"""
k_tab = self.killer_table[depth]
# Skip if already in killer table
if killer_move in k_tab['moves']:
return
# Check if table is full
if len(k_tab['moves']) < self.num_killer:
# Not Full
self._add_killer_move(depth, value, killer_move)
else:
# Full
# Check if move is better than worst current move
if value > k_tab['values'][0]:
# Remove Worst
_, worst_move = k_tab['values'].pop(0)
k_tab['moves'].remove(worst_move)
# Add Item
self._add_killer_move(depth, value, killer_move)
def _add_killer_move(self, depth, value, killer_move):
"""
Adds killer move to the table.
"""
# Update moves
self.killer_table[depth]['moves'].add(killer_move)
# Update values
self.killer_table[depth]['values'].append((value, killer_move))
self.killer_table[depth]['values'].sort(key=lambda x: x[0]) # Sorting each time is OK since length is small.
def is_killer(self, move, depth):
"""
Checks if the current move is a killer move.
Input:
move [chess.Move]
Move to check.
Output:
output [Boolean]
True if it is a kill move, False if not.
"""
return move in self.killer_table[depth]['moves']
def _reset_killer(self):
"""
Resets the killer moves table.
:return:
"""
self.killer_table = [{'moves': set(), 'values': list()}]
def prune(self, node):
"""
Recursive pruning of nodes
"""
if not node.expand or not node.children:
return
children = list(node.get_child_nodes())
# k = number of nodes that I keep
k = max(min(len(children), 2),
int(math.ceil(len(children) * (1 - self.prune_perc))))
quickselect(children, 0, len(children) - 1, k - 1, key=lambda x: x.value)
for child in children[:k]:
self.prune(child)
child.expand = True
for child in children[k:]:
child.expand = False
def run(self, board, time_limit=None, reset=False):
"""
For the duration of the time limit and depth limit:
1. Depth Limited Search
2. If enabled: Prune nodes
3. Increase max depth
Inputs:
board[chess.Board]:
Chess board to search the best move for
time_limit[float]:
time limit for search. If None, defaults to time_limit set in init
reset [bool]:
Resets the search instance. Used for training.
Outputs:
best_value [float]:
value achieved by best move
best_move [chess.Move]:
Best move to play
best_leaf [String]
FEN of the board of the leaf node which yielded the highest value.
"""
if time_limit is None:
time_limit = self.time_limit
if reset:
self.reset()
self.num_evals = 0
self.eval_time = 0
self.num_visits = []
self._reset_killer()
# Start timing
if time_limit is not None:
self.time_limit = time_limit
self.start_time = time.time()
self.time_left = True
self.root = SearchNode(board.fen(), 0)
self.depth_limit = 1
value = best_move = leaf_board = None
while self.time_left and (self.max_depth is None or self.depth_limit <= self.max_depth):
# Run search
new_results = self.DLS(self.root, alpha=float("-inf"), beta=float("inf"))
if not self.is_partial_search or (self.is_partial_search and self.use_partial_search):
value, best_move, leaf_board = new_results
self.is_partial_search = False
# Prune if necessary
if self.h_prune:
self.prune(self.root)
# Increase depth
self.depth_limit += 1
return value, best_move, leaf_board
class SearchNode:
def __init__(self, fen, depth, value=None):
"""
Generic node used for searching in 'rank_prune'.
Input:
fen [String]
FEN of the board.
depth [Int]
Depth at which the node occurs.
value [Float] (Optional)
Value of the node.
Non-Input Class Attributes
children [Dict of SearchNode's]
Children of the current SearchNode, key is Move which yields that SearchNode.
"""
self.fen = fen
self.depth = depth
self.children = {}
self.child_moves = []
self.expand = True
self.visited = False
def _add_child(self, move, child):
assert (isinstance(child, SearchNode))
# Convert move to uci
uci_move = move.uci()
self.children[uci_move] = child
self.child_moves.append(uci_move)
def gen_children(self):
# Create children
board = chess.Board(self.fen)
for move in board.legal_moves:
board.push(move)
self._add_child(move, SearchNode(board.fen(), self.depth + 1))
board.pop()
def get_child_nodes(self):
"""
Returns a list of child nodes.
"""
return self.children.values()
def __str__(self):
return "Node[{}, {}, {} children]".format(self.fen, self.depth, len(self.children))
class Minimax(IterativeDeepening):
"""
Uses a recursive function to perform a simple minimax with
alpha-beta pruning.
"""
def __init__(self, leaf_eval, max_depth=2, ab_prune=True):
"""
Constructor
Inputs:
leaf_eval[function]:
function used to evaluate leaf nodes
max_depth[int]:
depth to go to
ab_prune[bool]:
Alpha-beta pruning. Same results on or off, only set to off for td training
"""
super(Minimax, self).__init__(leaf_eval, time_limit=np.inf, max_depth=max_depth,
prune_perc=0.0, ab_prune=ab_prune, use_partial_search=False)
# Set depth limit to max depth
self.depth_limit = max_depth
# Turn off propagation
self.use_prop = False
# No buffer time needed
self.buff_time = 0
def __str__(self):
return "Minimax"
def run(self, board, time_limit=None, reset=False):
""" Reset some variables, call recursive function """
if reset:
self.reset()
self.num_evals = 0
self.eval_time = 0
self.num_visits = []
self.time_left = True
# Run to depth limit
self.start_time = 0
self.root = SearchNode(board.fen(), 0) # Note: store unflipped fen
return self.DLS(self.root, alpha=float("-inf"), beta=float("inf")) | PypiClean |
/ComputMath-1.0.1.tar.gz/ComputMath-1.0.1/README.md | Computational_Mathematics
---
---
Модуль содержит методы решения различных задач по
вычислительной математике. Рассчитан на 2-3 курс
по предмету "Вычислительная математика"
1. Численные методы решения нелинейных уравнений ( нахождения корней уравнения )
- [Метод половинного деления](root/half_division.py)
- [Метод простой итерации](root/simple_iteration.py)
2. Численные методы решения дифференциальных уравнений.
- [Метод Эйлера](differential/euler.py)
- [Рунге-Кутта 4-го порядка](differential/runge_kutt.py)
3. Численные методы вычисления определенных интегралов.
- [Методом Симпсона](integral/simpson.py)
- [Методом левых прямоугольников]()
---
Как это использовать, импортируем пакет по его имени `ComputMath`
создайте свою функцию, которую вам нужно подсчитать, и передайте
ее в нужный вам численный метод:
```python
import ComputMath
def function(x):
"""Функция для вычислений"""
return (2 * math.cos(x)) / 7
ComputMath.simple_iteration(function)
```
---
Вводим параметры:

Получаем результат:

| PypiClean |
/Colbert-0.30.tar.gz/Colbert-0.30/src/scripts/colbert_compte_de_resultat.py |
# Copyright (c) 2012 Stanislas Guerra <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json
from colbert.compte_de_resultat import compte_de_resultat
from colbert.utils import json_encoder
from optparse import OptionParser
from pathlib import Path
def main():
usage = "usage: %prog [options] balance-des-comptes.json"
version = "%prog 0.1"
parser = OptionParser(usage=usage, version=version, description=__doc__)
parser.add_option("-l", "--label", dest="label", default="Compte de résultat",
help="Titre à faire apparaitre sur le compte de résultat")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Vous devez passer en argument le chemin d'un fichier "
"de balance des comptes au format JSON.")
else:
balance_des_comptes = json.loads(Path(args[0]).read_text())
b = compte_de_resultat(balance_des_comptes, options.label)
json.dump(b, sys.stdout, default=json_encoder, indent=4)
if __name__ == "__main__":
main() | PypiClean |
/Apppath-1.0.3.tar.gz/Apppath-1.0.3/README.md | <!---->
<p align="center">
<img src=".github/images/apppath.svg" alt='AppPath' />
</p>
<h1 align="center">AppPath</h1>
<!--# AppPath-->
| [](https://pything.github.io/apppath/) | [](https://travis-ci.com/pything/apppath) | [](https://github.com/ambv/black) | [](https://coveralls.io/github/pything/apppath?branch=master) | [](https://lgtm.com/projects/g/pything/apppath/alerts/) | [](https://lgtm.com/projects/g/pything/apppath/context:python) |
|-----------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Workflows |
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|  |
|  |
|  |
___
> Clutter-free app data

___
A class and a set of functions for providing for system-consensual path for apps to store data, logs, cache...
| PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/plugins/common/block/lib/block.js | * Module which contains the base class for Blocks, and a Default/Debug block.
*
* @name block.block
* @namespace block/block
*/
define(['aloha', 'aloha/jquery', 'block/blockmanager', 'aloha/observable', 'aloha/floatingmenu'],
function(Aloha, jQuery, BlockManager, Observable, FloatingMenu) {
var GENTICS = window.GENTICS;
/**
* An aloha block has the following special properties, being readable through the
* "attr" function:
* - aloha-block-type -- TYPE of the AlohaBlock as registered by the BlockManager
*
* @name block.block.AbstractBlock
* @class An abstract block that must be used as a base class for custom blocks
*/
var AbstractBlock = Class.extend(Observable,
/** @lends block.block.AbstractBlock */
{
/**
* Event which is triggered if the block attributes change.
*
* @name block.block.AbstractBlock#change
* @event
*/
/**
* Title of the block, used to display the name in the sidebar editor.
*
* @type String
* @api
*/
title: null,
/**
* Id of the underlying $element, used to identify the block.
* @type String
*/
id: null,
/**
* The wrapping element of the block.
* @type jQuery
* @api
*/
$element: null,
/**
* if TRUE, the rendering is currently taking place. Used to prevent recursion
* errors.
* @type Boolean
*/
_currentlyRendering: false,
/**
* set to TRUE once the block is fully initialized.
*
* @type Boolean
*/
_initialized: false,
/**
* Set to TRUE if the last click activated a *nested editable*.
* If FALSE; the block itself is selected.
* This is needed when a block is deleted in IE7/8.
*/
_isInsideNestedEditable: false,
/**************************
* SECTION: Initialization and Lifecycle
**************************/
/**
* Initialize the basic block. Do not call directly; instead use jQuery(...).alohaBlock() to
* create new blocks.
*
* This function shall only be called through the BlockManager. See BlockManager::_blockify().
*
* @param {jQuery} $element Element that declares the block
* @constructor
*/
_constructor: function($element) {
var that = this;
this.$element = $element;
if ($element.attr('id')) {
this.id = $element.attr('id');
} else {
this.id = GENTICS.Utils.guid();
$element.attr('id', this.id);
}
$element.contentEditable(false);
$element.addClass('aloha-block');
if (this.isDraggable()) {
// Remove default drag/drop behavior of the browser
$element.find('img').attr('draggable', 'false');
$element.find('a').attr('draggable', 'false');
}
// While the event handler is defined here, it is connected to the DOM element inside "_connectThisBlockToDomElement"
this._onElementClickHandler = function(event) {
// We only activate ourselves if we are the innermost aloha-block.
// If we are not the innermost aloha-block, we get highlighted (but not activated) automatically
// by the innermost block.
if (jQuery(event.target).closest('.aloha-block').get(0) === that.$element.get(0)) {
that._fixScrollPositionBugsInIE();
that.activate(event.target, event);
}
};
// Register event handlers on the block
this._connectThisBlockToDomElement($element);
// This is executed when a block is selected through caret handling
// TODO!
//Aloha.bind('aloha-block-selected', function(event,obj) {
// if (that.$element.get(0) === obj) {
// that.activate();
// }
//});
this._initialized = true;
},
/**
* Is set inside the constructor to the event handler function
* which should be executed when the element is clicked.
*
* NOTE: Purely internal, "this" is not available inside this method!
*/
_onElementClickHandler: null,
/**
* We need to tell Aloha that we handle the event already;
* else a selection of a nested editable will *not* select
* the block.
*
* This callback is bound to the mousedown, focus and dblclick events.
*
* NOTE: Purely internal, "this" is not available inside this method!
*/
_preventSelectionChangedEventHandler: function() {
Aloha.Selection.preventSelectionChanged();
},
/**
* This method connects this block object to the passed DOM element.
* In detail, this method does the following:
*
* - if this.$element is already set, remove all block event handlers
* - sets this.$element = jQuery(newElement)
* - initialize event listeners on this.$element
* - call init()
*
* The method is called in two contexts: First, when a block is constructed
* to initialize the event listeners etc. Second, it is ALSO called when
* a block inside a nested block with editable in between is detected
* as inconsistent.
*/
_connectThisBlockToDomElement: function(newElement) {
var that = this;
var $newElement = jQuery(newElement);
if (this.$element) {
this.$element.unbind('click', this._onElementClickHandler);
this.$element.unbind('mousedown', this._preventSelectionChangedEventHandler);
this.$element.unbind('focus', this._preventSelectionChangedEventHandler);
this.$element.unbind('dblclick', this._preventSelectionChangedEventHandler);
}
this.$element = $newElement;
this.$element.bind('click', this._onElementClickHandler);
this.$element.bind('mousedown', this._preventSelectionChangedEventHandler);
this.$element.bind('focus', this._preventSelectionChangedEventHandler);
this.$element.bind('dblclick', this._preventSelectionChangedEventHandler);
this.init(this.$element, function() {
// WORKAROUND against loading order dependencies. If we have
// nested Blocks inside each other (with no editables in between)
// it could be that the *inner* block is initialized *before* the outer one.
//
// However, the inner block needs to know whether it shall render drag handles or not,
// and this depends on whether it is inside an editable or a block.
//
// In order to fix this case, we delay the the drag-handle-rendering (and all the other
// post-processing) to the next JavaScript Run Loop using a small timeout.
window.setTimeout(function() {
that._postProcessElementIfNeeded();
}, 5);
});
},
/**
* IE HACK: Our beloved Internet Explorer sometimes scrolls to the top
* of the page when activating an aloha block, and on numerous other occasions
* like when an <span> block is moved via drag/drop.
*
* We can detect this and scroll right back; although this will flicker
* a little (but still a lot better than before)
*/
_fixScrollPositionBugsInIE: function() {
var scrollPositionBefore = jQuery(window).scrollTop();
window.setTimeout(function() {
if (jQuery(window).scrollTop() !== scrollPositionBefore) {
jQuery(window).scrollTop(scrollPositionBefore);
}
}, 10);
},
/**
* Template method to initialize the block. Can be used to set attributes
* on the block, depending on the block contents. You will most probably
* use $element and this.attr() inside this function.
*
* !!! This method can be called *multiple times*, as it is called each time
* when $element has been disconnected from the DOM (which can happen because of various reasons)
* and the block needs to re-initialize. So make sure this method can be called *MULTIPLE TIMES*
* and always returns predictable results. This method must be idempotent, same as update().
*
* Furthermore, always when this method is finished, you need to call postProcessFn() afterwards.
* This function adds drag handles and other controls if necessary.
*
* @param {jQuery} $element a shortcut to the block's DOM element (this.$element) for easy processing
* @param {Function} postProcessFn this function MUST be called at all times the $element has been updated; as it adds drag/drop/delete/... handles if necessary
* @api
*/
init: function($element, postProcessFn) {
postProcessFn();
},
/**
* Callback which is executed when somebody triggers destroy().
*
* This only allows destruction if the block is *inside* an aloha-editable and *not* inside an aloha-block.
*
* @return {Boolean} true of destruction should happen, false otherwise
*/
shouldDestroy: function() {
var $closest = this.$element.parent().closest('.aloha-block,.aloha-editable,.aloha-block-collection');
if ($closest.hasClass('aloha-block-collection') && this.$element[0].tagName.toLowerCase() === 'div') {
return true;
} else {
return $closest.hasClass('aloha-editable');
}
},
/**
* Destroy this block instance completely. Removes the element from the DOM,
* unregisters it, and triggers a block-delete event on the BlockManager.
*
* @param {Boolean} force TRUE if you want to force deletion, despite shouldDestroy() returning false.
* @api
*/
destroy: function(force) {
if (!this.shouldDestroy() && force !== true) return;
var that = this;
var newRange = new GENTICS.Utils.RangeObject();
newRange.startContainer = newRange.endContainer = this.$element.parent()[0];
newRange.startOffset = newRange.endOffset = GENTICS.Utils.Dom.getIndexInParent(this.$element[0]);
BlockManager.trigger('block-delete', this);
BlockManager._unregisterBlock(this);
this.unbindAll();
var isInlineElement = this.$element[0].tagName.toLowerCase() === 'span';
this.$element.fadeOut('fast', function() {
that.$element.remove();
BlockManager.trigger('block-selection-change', []);
window.setTimeout(function() {
if (isInlineElement) {
newRange.select();
}
}, 5);
});
},
/**************************
* SECTION: Getters and Helpers
**************************/
/**
* Get the id of the block
* @returns {String}
*/
getId: function() {
return this.id;
},
/**
* Get a schema of attributes which shall be rendered / edited
* in the sidebar.
*
* @api
* @returns {Object}
*/
getSchema: function() {
return null;
},
/**
* Template Method which should return the block title. Needed for editing sidebar.
* By default, the block title is returned.
*
* @api
*/
getTitle: function() {
return this.title;
},
/**
* Returns true if the block is draggable because it is inside an aloha-editable, false otherwise.
*
* You cannot depend on this method's result during the *init* phase of the Aloha Block, as the
* outer block might not be initialized at that point yet. Thus, do not call this method inside init().
*
* @return Boolean
*/
isDraggable: function() {
if (this.$element[0].tagName.toLowerCase() === 'div' && this.$element.parents('.aloha-editable,.aloha-block,.aloha-block-collection').first().hasClass('aloha-block-collection')) {
// Here, we are inside an aloha-block-collection, and thus also need to be draggable.
return true;
}
return this.$element.parents('.aloha-editable,.aloha-block').first().hasClass('aloha-editable');
},
/**************************
* SECTION: Activation / Deactivation
**************************/
/**
* activates the block
* will select the block's contents, highlight it, update the floating menu and update the sidebar (if needed).
*
* When calling programmatically, do not set eventTarget or event arguments.
* @api
*/
activate: function(eventTarget, event) {
var highlightedBlocks = [];
// Deactivate currently highlighted blocks
jQuery.each(BlockManager._getHighlightedBlocks(), function() {
this.deactivate();
});
// Activate current block
if (this.$element.attr('data-block-skip-scope') !== 'true') {
FloatingMenu.setScope('Aloha.Block.' + this.attr('aloha-block-type'));
}
this.$element.addClass('aloha-block-active');
this._highlight();
highlightedBlocks.push(this);
// Highlight parent blocks
this.$element.parents('.aloha-block').each(function() {
var block = BlockManager.getBlock(this);
block._highlight();
highlightedBlocks.push(block);
});
// Browsers do not remove the cursor, so we enforce it when an aditable is clicked.
// However, when the user clicked inside a nested editable, we will not remove the cursor (as the user wants to start typing then)
// small HACK: we also do not deactivate if we are inside an aloha-table-cell-editable.
if (jQuery(eventTarget).closest('.aloha-editable,.aloha-block,.aloha-table-cell-editable').first().hasClass('aloha-block')) {
this._isInsideNestedEditable = false;
Aloha.getSelection().removeAllRanges();
} else {
this._isInsideNestedEditable = true;
if (event) {
// We now update the selection, as you clicked *inside* an editable inside the block
Aloha.Selection.updateSelection(event);
}
}
// Trigger selection change event
BlockManager.trigger('block-selection-change', highlightedBlocks);
},
/**
* Deactive the block
*/
deactivate: function() {
var that = this;
this._unhighlight();
this.$element.parents('.aloha-block').each(function() {
that._unhighlight();
});
this.$element.removeClass('aloha-block-active');
BlockManager.trigger('block-selection-change', []);
},
/**
* @returns {Boolean} True if this block is active
*/
isActive: function() {
return this.$element.hasClass('aloha-block-active');
},
/**
* Internal helper which sets a block as highlighted, because the block itself
* or a child block has been activated.
*/
_highlight: function() {
this.$element.addClass('aloha-block-highlighted');
BlockManager._setHighlighted(this);
},
/**
* Internal helper which sets a block as un-highlighted.
*/
_unhighlight: function() {
this.$element.removeClass('aloha-block-highlighted');
BlockManager._setUnhighlighted(this);
},
/**************************
* SECTION: Block Rendering
**************************/
/**
* Internal _update method, which needs to be called internally if a property
* changed. This is just a wrapper around update().
*/
_update: function() {
var that = this;
if (this._currentlyRendering) return;
if (!this._initialized) return;
this._currentlyRendering = true;
this.update(this.$element, function() {
that._postProcessElementIfNeeded();
});
this._currentlyRendering = false;
},
/**
* Template method to render contents of the block, must be implemented by specific block type.
* $element can be augumented by additional DOM elements like drag/drop handles. If you do
* any jQuery selection, you need to ignore all results which have a "aloha-block-handle" class
* set.
*
* Furthermore, always when you update $element, you need to call postProcessFn() afterwards.
* This function adds drag handles and other controls if necessary.
*
* This method should *only* be called from the internal _update method.
*
* @param {jQuery} $element a shortcut to the block's DOM element (this.$element) for easy processing
* @param {Function} postProcessFn this function MUST be called at all times the $element has been updated; as it adds drag/drop/delete/... handles if necessary
*
* @api
*/
update: function($element, postProcessFn) {
postProcessFn();
},
/**
* Post processor, being called to augument the Block Element's DOM by drag handles etc.
*
* This method must be idempotent. I.e. it must produce the same results
* when called once or twice.
*/
_postProcessElementIfNeeded: function() {
this.createEditablesIfNeeded();
this._checkThatNestedBlocksAreStillConsistent();
this._makeNestedBlockCollectionsSortable();
this.renderBlockHandlesIfNeeded();
if (this.isDraggable() && this.$element[0].tagName.toLowerCase() === 'span') {
this._setupDragDropForInlineElements();
this._disableUglyInternetExplorerDragHandles();
} else if (this.isDraggable() && this.$element[0].tagName.toLowerCase() === 'div') {
this._setupDragDropForBlockElements();
this._disableUglyInternetExplorerDragHandles();
}
},
/**
* Due to indeterminate initialization order of nested blocks,
* it can happen that blockifying a parent block deconnects $element inside
* child blocks.
*
* This is the case we detect here; and if it happens, we reconnect the
* block to its currently visible DOM element.
*/
_checkThatNestedBlocksAreStillConsistent: function() {
this.$element.find('.aloha-block').each(function() {
var block = BlockManager.getBlock(this);
if (block && block.$element[0] !== this) {
block._connectThisBlockToDomElement(this);
}
});
},
/**
* If a nested element is marked as "aloha-block-collection",
* we want to make it sortable, by calling the appropriate Block Manager
* function.
*/
_makeNestedBlockCollectionsSortable: function() {
var that = this;
this.$element.find('.aloha-block-collection').each(function() {
var $blockCollection = jQuery(this);
if ($blockCollection.closest('.aloha-block').get(0) === that.$element.get(0)) {
// We are only responsible for one-level-down Block Collections, not
// for nested ones.
BlockManager.createBlockLevelSortableForEditableOrBlockCollection($blockCollection);
}
})
},
/**
* Helper which disables the ugly IE drag handles. They are still shown, but at
* least they do not work anymore
*/
_disableUglyInternetExplorerDragHandles: function() {
this.$element.get( 0 ).onresizestart = function ( e ) { return false; };
this.$element.get( 0 ).oncontrolselect = function ( e ) { return false; };
// We do NOT abort the "ondragstart" event as it is required for drag/drop.
this.$element.get( 0 ).onmovestart = function ( e ) { return false; };
this.$element.get( 0 ).onselectstart = function ( e ) { return false; };
},
/**************************
* SECTION: Drag&Drop for INLINE elements
**************************/
_setupDragDropForInlineElements: function() {
var that = this;
// Here, we store the character DOM element which has been hovered upon recently.
// This is needed as somehow, the "drop" event on the character is not fired.
// Furthermore, we use it to know whether we need to "revert" the draggable to the original state or not.
var lastHoveredCharacter = null;
// HACK for IE7: Internet Explorer 7 has a very weird behavior in
// not always firing the "drop" callback of the inner droppable... However,
// the "over" and "out" callbacks are fired correctly.
// Because of this, we handle the "drop" inside the "stop" callback in IE7
// instead of the "drop" callback (where it is handled in all other browsers)
// This $currentDraggable is also needed as part of the IE 7 hack.
// $currentDraggable contains a reference to the current draggable, but
// only makes sense to read when lastHoveredCharacter !== NULL.
var $currentDraggable = null;
// This dropFn is the callback which handles the actual moving of
// nodes. We created a separate function for it, as it is called inside the "stop" callback
// in IE7 and inside the "drop" callback in all other browsers.
var dropFn = function() {
if (lastHoveredCharacter) {
// the user recently hovered over a character
var $dropReferenceNode = jQuery(lastHoveredCharacter);
if ($dropReferenceNode.is('.aloha-block-dropInlineElementIntoEmptyBlock')) {
// the user wanted to drop INTO an empty block!
$dropReferenceNode.children().remove();
$dropReferenceNode.append($currentDraggable);
} else if ($dropReferenceNode.is('.aloha-block-droppable-right')) {
$dropReferenceNode.html($dropReferenceNode.html() + ' ');
// Move draggable after drop reference node
$dropReferenceNode.after($currentDraggable);
} else {
// Insert space in the beginning of the drop reference node
if ($dropReferenceNode.prev('[data-i]').length > 0) {
// If not the last element, insert space in front of next element (i.e. after the moved block)
$dropReferenceNode.prev('[data-i]').html($dropReferenceNode.prev('[data-i]').html() + ' ');
}
$dropReferenceNode.html(' ' + $dropReferenceNode.html());
// Move draggable before drop reference node
$dropReferenceNode.before($currentDraggable);
}
$currentDraggable.removeClass('ui-draggable').css({'left': 0, 'top': 0}); // Remove "draggable" options... somehow "Destroy" does not work
that._fixScrollPositionBugsInIE();
}
jQuery('.aloha-block-dropInlineElementIntoEmptyBlock').removeClass('aloha-block-dropInlineElementIntoEmptyBlock');
};
var editablesWhichNeedToBeCleaned = [];
this.$element.draggable({
handle: '.aloha-block-draghandle',
scope: 'aloha-block-inlinedragdrop',
revert: function() {
return (lastHoveredCharacter === null);
},
revertDuration: 250,
stop: function() {
if (Ext.isIE7) {
dropFn();
}
jQuery.each(editablesWhichNeedToBeCleaned, function() {
that._dd_traverseDomTreeAndRemoveSpans(this);
})
$currentDraggable = null;
editablesWhichNeedToBeCleaned = [];
},
start: function() {
editablesWhichNeedToBeCleaned = [];
// In order to make Inline Blocks droppable into empty paragraphs, we insert a manually before the placeholder-br.
// -> for IE
jQuery('.aloha-editable').children('p:empty').html(' ');
// Make **ALL** editables on the page droppable, such that it is possible
// to drag/drop *across* editable boundaries
var droppableCfg = {
// make block elements droppable
tolerance: 'pointer',
addClasses: false, // performance optimization
scope: 'aloha-block-inlinedragdrop',
/**
* When hovering over a paragraph, we make convert its contents into spans, to make
* them droppable.
*/
over: function(event, ui) {
if (editablesWhichNeedToBeCleaned.indexOf(this) === -1) {
editablesWhichNeedToBeCleaned.push(this);
}
$currentDraggable = ui.draggable;
if (jQuery(this).is(':empty') || jQuery(this).children('br.aloha-end-br').length > 0 || jQuery(this).html() === ' ') {
// the user tries to drop into an empty container, thus we highlight the container and do an early return
jQuery(this).addClass('aloha-block-dropInlineElementIntoEmptyBlock');
lastHoveredCharacter = this;
return;
}
that._dd_traverseDomTreeAndWrapCharactersWithSpans(this);
jQuery('span[data-i]', this).droppable({
tolerance: 'pointer',
addClasses: false,
scope: 'aloha-block-inlinedragdrop',
over: function() {
if (lastHoveredCharacter) {
// Just to be sure, we remove the css class of the last hovered character.
// This is needed such that spans are deselected which contain multiple
// lines.
jQuery(lastHoveredCharacter).removeClass('aloha-block-droppable');
}
lastHoveredCharacter = this;
jQuery(this).addClass('aloha-block-droppable');
},
out: function() {
jQuery(this).removeClass('aloha-block-droppable');
if (lastHoveredCharacter === this) {
lastHoveredCharacter = null;
}
}
});
// Now that we updated the droppables in the system, we need to recalculate
// the Drag Drop offsets.
jQuery.ui.ddmanager.prepareOffsets(ui.draggable.data('draggable'), event);
},
out: function() {
jQuery(this).removeClass('aloha-block-dropInlineElementIntoEmptyBlock');
},
/**
* When dropping over a paragraph, we use the "lastHoveredCharacter"
* as drop target.
*/
drop: function() {
if (!Ext.isIE7) {
dropFn();
}
}
};
jQuery('.aloha-editable').children(':not(.aloha-block)').droppable(droppableCfg);
// Small HACK: Also make table cells droppable
jQuery('.aloha-table-cell-editable').droppable(droppableCfg);
}
});
},
/**
* Helper which traverses the DOM tree starting from el and wraps all non-empty texts with spans,
* such that they can act as drop target.
*
* @param {DomElement} el
*/
_dd_traverseDomTreeAndWrapCharactersWithSpans: function(el) {
var child;
for(var i=0, l=el.childNodes.length; i < l; i++) {
child = el.childNodes[i];
if (child.nodeType === 1) { // DOM Nodes
if (!~child.className.indexOf('aloha-block') && child.attributes['data-i'] === undefined) {
// We only recurse if child does NOT have the class "aloha-block", and is NOT data-i
this._dd_traverseDomTreeAndWrapCharactersWithSpans(child);
} else if (child.attributes['data-i']) {
// data-i set -> we have converted this hierarchy level already --> early return!
return;
}
} else if (child.nodeType === 3) { // Text Nodes
var numberOfSpansInserted = this._dd_insertSpans(child);
i += numberOfSpansInserted;
l += numberOfSpansInserted;
}
}
},
/**
* Helper which splits text on word boundaries, adding whitespaces to the following element.
* Examples:
* - "Hello world" -> ["Hello", " world"]
* - " Hello world" -> [" Hello", " world"]
* --> see the unit tests for the specification
*/
_dd_splitText: function(text) {
var textParts = text.split(/(?=\b)/);
var cleanedTextParts = [];
var isWhitespace = false;
for (var i=0,l=textParts.length; i<l; i++) {
if (!/[^\t\n\r ]/.test(textParts[i])) {
// if the current text part is just whitespace, we add a flag...
isWhitespace = true;
} else {
if (isWhitespace) {
// we have a whitespace to add
cleanedTextParts.push(' ' + textParts[i]);
isWhitespace = false;
} else {
cleanedTextParts.push(textParts[i]);
}
}
}
if (isWhitespace) {
cleanedTextParts[cleanedTextParts.length - 1] += ' ';
}
return cleanedTextParts;
},
/**
* This is a helper for _dd_traverseDomTreeAndWrapCharactersWithSpans,
* performing the actual conversion.
*
* This function returns the number of additional DOM elements inserted.
* This is "numberOfSpansCreated - 1" (because one text node has been initially there)
*/
_dd_insertSpans: function(el) {
var text = el.nodeValue;
// If node just contains empty strings, we do not do anything.
// Use ECMA-262 Edition 3 String and RegExp features
if (!/[^\t\n\r ]/.test(text)) {
return 0;
}
var newNodes = document.createDocumentFragment();
var splitText = this._dd_splitText(text);
var l = splitText.length;
var x, word, leftWordPartLength, t;
var numberOfSpansInserted = 0;
for (var i=0; i<l; i++) {
// left half of word
word = splitText[i];
if (word.length === 0) continue;
// We use "floor" here such that sentence delimiters like "!" can have a block placed afterwards
leftWordPartLength = Math.floor(word.length/2);
// For Internet Explorer, we only make dropping AFTER words possible to improve performance
if (Ext.isIE7 || Ext.isIE8) {
leftWordPartLength = 0;
}
if (leftWordPartLength > 0) {
x = document.createElement('span');
x.appendChild(document.createTextNode(word.substr(0, leftWordPartLength)));
x.setAttribute('data-i', i);
newNodes.appendChild(x);
numberOfSpansInserted++;
}
// right half of word
x = document.createElement('span');
t = word.substr(leftWordPartLength);
x.appendChild(document.createTextNode(t));
x.setAttribute('data-i', i);
x.setAttribute('class', 'aloha-block-droppable-right');
newNodes.appendChild(x);
numberOfSpansInserted++;
}
el.parentNode.replaceChild(newNodes, el);
return numberOfSpansInserted-1;
},
/**
* After the Drag/Drop operation, we need to remove the SPAN elements
* again.
*/
_dd_traverseDomTreeAndRemoveSpans: function(el) {
var nodesToDelete = [], convertBack;
convertBack = function(el) {
var currentlyTraversingExpandedText = false, currentText, lastNode;
var child;
for(var i=0, l=el.childNodes.length; i < l; i++) {
child = el.childNodes[i];
if (child.nodeType === 1) { // Node
if (child.attributes['data-i'] !== undefined) {
if (!currentlyTraversingExpandedText) {
// We did not traverse expanded text before, and just entered an expanded text section
// thus, we reset all variables to their initial state
currentlyTraversingExpandedText = true;
currentText = '';
lastNode = undefined;
}
if (currentlyTraversingExpandedText) {
// We are currently traversing the expanded text nodes, so we collect their data
// together in the currentText variable. We know that they only
// have one TextNode child, as this is the way we constructed them.
//
// Note: we do NOT use child.innerHTML here, as this returns HTML entities;
// but we need the HTML entities already processed:
// - child.innerHTML returns "Hello World"
// - child.firstChild.nodeValue returns "Hello World"
currentText += child.firstChild.nodeValue;
if (lastNode) {
nodesToDelete.push(lastNode);
}
lastNode = child;
}
} else {
if (currentlyTraversingExpandedText) {
currentlyTraversingExpandedText = false;
// We just left a region with data-i elements set.
// so, we need to store the currentText back to the region.
// We do this by using the last visited node as anchor.
lastNode.parentNode.replaceChild(document.createTextNode(currentText), lastNode);
}
// Recursion
if (!~child.className.indexOf('aloha-block')) {
// If child does not have the class "aloha-block", we iterate into it
convertBack(child);
}
}
}
}
if (currentlyTraversingExpandedText) {
// Special case: the last child node *is* a wrapped text node and we are at the end of the collection.
// In this case, we convert the text as well.
lastNode.parentNode.replaceChild(document.createTextNode(currentText), lastNode);
}
};
convertBack(el);
for (var i=0, l=nodesToDelete.length; i<l; i++) {
nodesToDelete[i].parentNode.removeChild(nodesToDelete[i]);
}
},
/**************************
* SECTION: Drag&Drop for Block elements
**************************/
_setupDragDropForBlockElements: function() {
// Mark the drag handle with an extra CSS class, such that it is picked up by BlockManager.initializeBlockLevelDragDrop()
this.$element.find('.aloha-block-draghandle').addClass('aloha-block-draghandle-blocklevel');
},
/**************************
* SECTION: Other Rendering Helpers
**************************/
/**
* Create editables from the inner content that was
* rendered for this block.
*
* This method must be idempotent. I.e. it must produce the same results
* when called once or twice.
*
* Override to use a custom implementation and to pass
* special configuration to .aloha()
*/
createEditablesIfNeeded: function() {
// TODO: only create them if they are no aloha element yet...
// TODO: should only happen inside Aloha
this.$element.find('.aloha-editable').aloha();
},
/**
* Render block toolbar elements
*
* This method must be idempotent. I.e. it must produce the same results
* when called once or twice.
*
* Template method to render custom block UI.
* @api
*/
renderBlockHandlesIfNeeded: function() {
if (this.isDraggable()) {
if (this.$element.children('.aloha-block-draghandle').length === 0) {
this.$element.prepend('<span class="aloha-block-handle aloha-block-draghandle"></span>');
}
}
},
/**************************
* SECTION: Attribute Handling
**************************/
/**
* Get or set one or many attribute, similar to the jQuery attr() function.
*
* The attribute keys are converted internally to lowercase,
* so attr('foo', 'bar') and attr('FoO', 'bar') are the same internally.
* The same applies to reading.
*
* It is not allowed to set internal attributes (starting with aloha-block-) through this API.
*
* @api
* @param {String|Object} attributeNameOrObject
* @param {String} attributeValue
* @param {Boolean} Optional. If true, we do not fire change events.
*/
attr: function(attributeNameOrObject, attributeValue, suppressEvents) {
var that = this, attributeChanged = false;
if (arguments.length >= 2) {
if (attributeNameOrObject.substr(0, 12) === 'aloha-block-') {
Aloha.Log.error('block/block', 'It is not allowed to set internal block attributes (starting with aloha-block-) through Block.attr() (You tried to set ' + attributeNameOrObject + ')');
return;
}
if (this._getAttribute(attributeNameOrObject) !== attributeValue) {
attributeChanged = true;
}
this._setAttribute(attributeNameOrObject, attributeValue);
} else if (typeof attributeNameOrObject === 'object') {
jQuery.each(attributeNameOrObject, function(key, value) {
if (key.substr(0, 12) === 'aloha-block-') {
Aloha.Log.error('block/block', 'It is not allowed to set internal block attributes (starting with aloha-block-) through Block.attr() (You tried to set ' + key + ')');
return;
}
if (that._getAttribute(key) !== value) {
attributeChanged = true;
}
that._setAttribute(key, value);
});
} else if (typeof attributeNameOrObject === 'string') {
return this._getAttribute(attributeNameOrObject);
} else {
return this._getAttributes();
}
if (attributeChanged && !suppressEvents) {
this._update();
this.trigger('change');
}
return null;
},
/**
* Internal helper for setting a single attribute.
*/
_setAttribute: function(name, value) {
this.$element.attr('data-' + name.toLowerCase(), value);
},
/**
* Internal helper for getting an attribute
*/
_getAttribute: function(name) {
return this.$element.attr('data-' + name.toLowerCase());
},
/**
* Internal helper for getting all attributes
*/
_getAttributes: function() {
var attributes = {};
// element.data() not always up-to-date, that's why we iterate over the attributes directly.
jQuery.each(this.$element[0].attributes, function(i, attribute) {
if (attribute.name.substr(0, 5) === 'data-') {
attributes[attribute.name.substr(5).toLowerCase()] = attribute.value;
}
});
return attributes;
}
});
/**
* @name block.block.DefaultBlock
* @class A default block that renders the initial content
* @extends block.block.AbstractBlock
*/
var DefaultBlock = AbstractBlock.extend(
/** @lends block.block.DefaultBlock */
{
update: function($element, postProcessFn) {
postProcessFn();
}
});
/**
* @name block.block.DebugBlock
* @class A debug block outputs its attributes in a table
* @extends block.block.AbstractBlock
*/
var DebugBlock = AbstractBlock.extend(
/** @lends block.block.DebugBlock */
{
title: 'Debugging',
init: function($element, postProcessFn) {
this.update($element, postProcessFn);
},
update: function($element, postProcessFn) {
$element.css({display: 'block'});
var renderedAttributes = '<table class="debug-block">';
jQuery.each(this.attr(), function(k, v) {
renderedAttributes += '<tr><th>' + k + '</th><td>' + v + '</td></tr>';
});
renderedAttributes += '</table>';
$element.html(renderedAttributes);
postProcessFn();
}
});
return {
AbstractBlock: AbstractBlock,
DefaultBlock: DefaultBlock,
DebugBlock: DebugBlock
};
}); | PypiClean |
/BespON-0.6.0.tar.gz/BespON-0.6.0/bespon/coding.py | from __future__ import (division, print_function, absolute_import,
unicode_literals)
import sys
# pylint: disable=E0602, W0622
if sys.version_info.major == 2:
str = unicode
chr = unichr
# pylint: enable=E0602, W0622
if sys.maxunicode == 0xFFFF:
__narrow_chr__ = unichr
__narrow_ord__ = ord
def chr_surrogate(cp):
'''
Version of `chr()` that uses Unicode surrogate pairs to represent
code points outside the Basic Multilingual Plane.
'''
if cp <= 0xFFFF:
return __narrow_chr__(cp)
# http://www.unicode.org/faq//utf_bom.html#utf16-4
return __narrow_chr__(0xD7C0 + (cp >> 10)) + __narrow_chr__(0xDC00 + (cp & 0x3FF))
def ord_surrogate(c):
'''
Version of `ord()` that can accept Unicode surrogate pairs and return
the integer value of the code point represented by them.
'''
if len(c) != 2:
return __narrow_ord__(c)
ord_c_0 = __narrow_ord__(c[0])
ord_c_1 = __narrow_ord__(c[1])
if 0xD800 <= ord_c_0 <= 0xDBFF and 0xDC00 <= ord_c_1 <= 0xDFFF:
# http://www.unicode.org/faq//utf_bom.html#utf16-4
return -0x35FDC00 + (ord_c_0 << 10) + ord_c_1
raise UnicodeError
else:
def chr_surrogate(cp):
'''
Version of `chr()` that uses Unicode surrogate pairs to represent
code points outside the Basic Multilingual Plane.
'''
if cp <= 0xFFFF:
return chr(cp)
# http://www.unicode.org/faq//utf_bom.html#utf16-4
return chr(0xD7C0 + (cp >> 10)) + chr(0xDC00 + (cp & 0x3FF))
def ord_surrogate(c):
'''
Version of `ord()` that can accept Unicode surrogate pairs and return
the integer value of the code point represented by them.
'''
if len(c) != 2:
return ord(c)
ord_c_0 = ord(c[0])
ord_c_1 = ord(c[1])
if 0xD800 <= ord_c_0 <= 0xDBFF and 0xDC00 <= ord_c_1 <= 0xDFFF:
# http://www.unicode.org/faq//utf_bom.html#utf16-4
return -0x35FDC00 + (ord_c_0 << 10) + ord_c_1
raise UnicodeError | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/0.6/vm.js | var vm = {};
/**
* createScript compiles code but does not run it. Instead, it returns a
* vm.Script object representing this compiled code. This script can be run
* later many times using methods below. The returned script is not bound
* to any global object. It is bound before each run, just for that run.
* filename is optional, it's only used in stack traces.
* @param code
* @param filename
* @returns {vm.Script}
*/
vm.createScript = function(code, filename) {}
/**
* vm.runInThisContext() compiles code, runs it and returns the result.
* Running code does not have access to local scope. filename is optional,
* it's used only in stack traces.
* @param code
* @param filename
*/
vm.runInThisContext = function(code, filename) {}
/**
* vm.runInNewContext compiles code, then runs it in sandbox and returns
* the result. Running code does not have access to local scope. The object
* sandbox will be used as the global object for code.
* @param code
* @param sandbox
* @param filename
*/
vm.runInNewContext = function(code, sandbox, filename) {}
/**
* A class for running scripts. Returned by vm.createScript.
* @constructor
*/
vm.Script = function() {}
/**
* Similar to vm.runInThisContext but a method of a precompiled Script
* object.
*/
vm.Script.prototype.runInThisContext = function() {}
/**
* Similar to vm.runInNewContext a method of a precompiled Script object.
* @param sandbox
*/
vm.Script.prototype.runInNewContext = function(sandbox) {}
/**
* vm.createContext creates a new context which is suitable for use as the
* 2nd argument of a subsequent call to vm.runInContext. A (V8) context
* comprises a global object together with a set of build-in objects and
* functions. The optional argument initSandbox will be shallow-copied to
* seed the initial contents of the global object used by the context.
* @param initSandbox
*/
vm.createContext = function(initSandbox) {}
/**
* vm.runInContext compiles code, then runs it in context and returns the
* result. A (V8) context comprises a global object, together with a set of
* built-in objects and functions. Running code does not have access to
* local scope and the global object held within context will be used as
* the global object for code.
* @param code
* @param context
* @param filename
*/
vm.runInContext = function(code, context, filename) {}
exports = vm; | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/5899.3ad49e66af3ff7c33275.min.js | "use strict";(self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[5899,85042,34754,18045],{63121:function(A,t,e){e.r(t),e.d(t,{ListView:function(){return D},default:function(){return m}});var n=e(93379),i=e.n(n),r=e(7795),a=e.n(r),o=e(3565),s=e.n(o),c=e(19216),l=e.n(c),d=e(44589),f=e.n(d),g=e(60402),u={};u.styleTagTransform=f(),u.setAttributes=s(),u.insert=function(A){var t=document.head.querySelectorAll("*")[0];t?document.head.insertBefore(A,t):document.head.append(A)},u.domAPI=a(),u.insertStyleElement=l();i()(g.Z,u),g.Z&&g.Z.locals&&g.Z.locals;var C=e(28027),h=e(70655),B=function(A){function t(){var t=null!==A&&A.apply(this,arguments)||this;return t.state={textId:(0,C.osf)()},t}return(0,h.ZT)(t,A),t.prototype.render=function(){var A=this.context,t=A.theme,e=A.dateEnv,n=A.options,i=A.viewApi,r=this.props,a=r.cellId,o=r.dayDate,s=r.todayRange,c=this.state.textId,l=(0,C.iCZ)(o,s),d=n.listDayFormat?e.format(o,n.listDayFormat):"",f=n.listDaySideFormat?e.format(o,n.listDaySideFormat):"",g=(0,h.pi)({date:e.toDate(o),view:i,textId:c,text:d,sideText:f,navLinkAttrs:(0,C.rcD)(this.context,o),sideNavLinkAttrs:(0,C.rcD)(this.context,o,"day",!1)},l),u=["fc-list-day"].concat((0,C.yLW)(l,t));return(0,C.azq)(C.QJ3,{hookProps:g,classNames:n.dayHeaderClassNames,content:n.dayHeaderContent,defaultContent:v,didMount:n.dayHeaderDidMount,willUnmount:n.dayHeaderWillUnmount},(function(A,e,n,i){return(0,C.azq)("tr",{ref:A,className:u.concat(e).join(" "),"data-date":(0,C.SVl)(o)},(0,C.azq)("th",{scope:"colgroup",colSpan:3,id:a,"aria-labelledby":c},(0,C.azq)("div",{className:"fc-list-day-cushion "+t.getClass("tableCellShaded"),ref:n},i)))}))},t}(C.H6J);function v(A){return(0,C.azq)(C.HYg,null,A.text&&(0,C.azq)("a",(0,h.pi)({id:A.textId,className:"fc-list-day-text"},A.navLinkAttrs),A.text),A.sideText&&(0,C.azq)("a",(0,h.pi)({"aria-hidden":!0,className:"fc-list-day-side-text"},A.sideNavLinkAttrs),A.sideText))}var p=(0,C.SPZ)({hour:"numeric",minute:"2-digit",meridiem:"short"}),Q=function(A){function t(){return null!==A&&A.apply(this,arguments)||this}return(0,h.ZT)(t,A),t.prototype.render=function(){var A=this.props,t=this.context,e=A.seg,n=A.timeHeaderId,i=A.eventHeaderId,r=A.dateHeaderId,a=t.options.eventTimeFormat||p;return(0,C.azq)(C.Vsx,{seg:e,timeText:"",disableDragging:!0,disableResizing:!0,defaultContent:function(){return function(A,t){var e=(0,C.PsW)(A,t);return(0,C.azq)("a",(0,h.pi)({},e),A.eventRange.def.title)}(e,t)},isPast:A.isPast,isFuture:A.isFuture,isToday:A.isToday,isSelected:A.isSelected,isDragging:A.isDragging,isResizing:A.isResizing,isDateSelecting:A.isDateSelecting},(function(A,o,s,c,l){return(0,C.azq)("tr",{className:["fc-list-event",l.event.url?"fc-event-forced-url":""].concat(o).join(" "),ref:A},function(A,t,e,n,i){var r=e.options;if(!1!==r.displayEventTime){var a=A.eventRange.def,o=A.eventRange.instance,s=!1,c=void 0;if(a.allDay?s=!0:(0,C.p7j)(A.eventRange.range)?A.isStart?c=(0,C.r39)(A,t,e,null,null,o.range.start,A.end):A.isEnd?c=(0,C.r39)(A,t,e,null,null,A.start,o.range.end):s=!0:c=(0,C.r39)(A,t,e),s){var l={text:e.options.allDayText,view:e.viewApi};return(0,C.azq)(C.QJ3,{hookProps:l,classNames:r.allDayClassNames,content:r.allDayContent,defaultContent:y,didMount:r.allDayDidMount,willUnmount:r.allDayWillUnmount},(function(A,t,e,r){return(0,C.azq)("td",{ref:A,headers:n+" "+i,className:["fc-list-event-time"].concat(t).join(" ")},r)}))}return(0,C.azq)("td",{className:"fc-list-event-time"},c)}return null}(e,a,t,n,r),(0,C.azq)("td",{"aria-hidden":!0,className:"fc-list-event-graphic"},(0,C.azq)("span",{className:"fc-list-event-dot",style:{borderColor:l.borderColor||l.backgroundColor}})),(0,C.azq)("td",{ref:s,headers:i+" "+r,className:"fc-list-event-title"},c))}))},t}(C.H6J);function y(A){return A.text}var D=function(A){function t(){var t=null!==A&&A.apply(this,arguments)||this;return t.computeDateVars=(0,C.HPs)(b),t.eventStoreToSegs=(0,C.HPs)(t._eventStoreToSegs),t.state={timeHeaderId:(0,C.osf)(),eventHeaderId:(0,C.osf)(),dateHeaderIdRoot:(0,C.osf)()},t.setRootEl=function(A){A?t.context.registerInteractiveComponent(t,{el:A}):t.context.unregisterInteractiveComponent(t)},t}return(0,h.ZT)(t,A),t.prototype.render=function(){var A=this,t=this.props,e=this.context,n=["fc-list",e.theme.getClass("table"),!1!==e.options.stickyHeaderDates?"fc-list-sticky":""],i=this.computeDateVars(t.dateProfile),r=i.dayDates,a=i.dayRanges,o=this.eventStoreToSegs(t.eventStore,t.eventUiBases,a);return(0,C.azq)(C.xS$,{viewSpec:e.viewSpec,elRef:this.setRootEl},(function(e,i){return(0,C.azq)("div",{ref:e,className:n.concat(i).join(" ")},(0,C.azq)(C.Ttm,{liquid:!t.isHeightAuto,overflowX:t.isHeightAuto?"visible":"hidden",overflowY:t.isHeightAuto?"visible":"auto"},o.length>0?A.renderSegList(o,r):A.renderEmptyMessage()))}))},t.prototype.renderEmptyMessage=function(){var A=this.context,t=A.options,e=A.viewApi,n={text:t.noEventsText,view:e};return(0,C.azq)(C.QJ3,{hookProps:n,classNames:t.noEventsClassNames,content:t.noEventsContent,defaultContent:w,didMount:t.noEventsDidMount,willUnmount:t.noEventsWillUnmount},(function(A,t,e,n){return(0,C.azq)("div",{className:["fc-list-empty"].concat(t).join(" "),ref:A},(0,C.azq)("div",{className:"fc-list-empty-cushion",ref:e},n))}))},t.prototype.renderSegList=function(A,t){var e=this.context,n=e.theme,i=e.options,r=this.state,a=r.timeHeaderId,o=r.eventHeaderId,s=r.dateHeaderIdRoot,c=function(A){var t,e,n=[];for(t=0;t<A.length;t+=1)(n[(e=A[t]).dayIndex]||(n[e.dayIndex]=[])).push(e);return n}(A);return(0,C.azq)(C.wh8,{unit:"day"},(function(A,e){for(var r=[],l=0;l<c.length;l+=1){var d=c[l];if(d){var f=(0,C.SVl)(t[l]),g=s+"-"+f;r.push((0,C.azq)(B,{key:f,cellId:g,dayDate:t[l],todayRange:e}));for(var u=0,v=d=(0,C.hak)(d,i.eventOrder);u<v.length;u++){var p=v[u];r.push((0,C.azq)(Q,(0,h.pi)({key:f+":"+p.eventRange.instance.instanceId,seg:p,isDragging:!1,isResizing:!1,isDateSelecting:!1,isSelected:!1,timeHeaderId:a,eventHeaderId:o,dateHeaderId:g},(0,C.jHR)(p,e,A))))}}}return(0,C.azq)("table",{className:"fc-list-table "+n.getClass("table")},(0,C.azq)("thead",null,(0,C.azq)("tr",null,(0,C.azq)("th",{scope:"col",id:a},i.timeHint),(0,C.azq)("th",{scope:"col","aria-hidden":!0}),(0,C.azq)("th",{scope:"col",id:o},i.eventHint))),(0,C.azq)("tbody",null,r))}))},t.prototype._eventStoreToSegs=function(A,t,e){return this.eventRangesToSegs((0,C.y$4)(A,t,this.props.dateProfile.activeRange,this.context.options.nextDayThreshold).fg,e)},t.prototype.eventRangesToSegs=function(A,t){for(var e=[],n=0,i=A;n<i.length;n++){var r=i[n];e.push.apply(e,this.eventRangeToSegs(r,t))}return e},t.prototype.eventRangeToSegs=function(A,t){var e,n,i,r=this.context.dateEnv,a=this.context.options.nextDayThreshold,o=A.range,s=A.def.allDay,c=[];for(e=0;e<t.length;e+=1)if((n=(0,C.cMs)(o,t[e]))&&(i={component:this,eventRange:A,start:n.start,end:n.end,isStart:A.isStart&&n.start.valueOf()===o.start.valueOf(),isEnd:A.isEnd&&n.end.valueOf()===o.end.valueOf(),dayIndex:e},c.push(i),!i.isEnd&&!s&&e+1<t.length&&o.end<r.add(t[e+1].start,a))){i.end=o.end,i.isEnd=!0;break}return c},t}(C.IdW);function w(A){return A.text}function b(A){for(var t=(0,C.b7Q)(A.renderRange.start),e=A.renderRange.end,n=[],i=[];t<e;)n.push(t),i.push({start:t,end:(0,C.E4D)(t,1)}),t=(0,C.E4D)(t,1);return{dayDates:n,dayRanges:i}}var E={listDayFormat:I,listDaySideFormat:I,noEventsClassNames:C.yRu,noEventsContent:C.yRu,noEventsDidMount:C.yRu,noEventsWillUnmount:C.yRu};function I(A){return!1===A?null:(0,C.SPZ)(A)}var m=(0,C.rxu)({optionRefiners:E,views:{list:{component:D,buttonTextKey:"list",listDayFormat:{month:"long",day:"numeric",year:"numeric"}},listDay:{type:"list",duration:{days:1},listDayFormat:{weekday:"long"}},listWeek:{type:"list",duration:{weeks:1},listDayFormat:{weekday:"long"},listDaySideFormat:{month:"long",day:"numeric",year:"numeric"}},listMonth:{type:"list",duration:{month:1},listDaySideFormat:{weekday:"long"}},listYear:{type:"list",duration:{year:1},listDaySideFormat:{weekday:"long"}}}})},60402:function(A,t,e){var n=e(87537),i=e.n(n),r=e(23645),a=e.n(r)()(i());a.push([A.id,':root{--fc-list-event-dot-width: 10px;--fc-list-event-hover-bg-color: #f5f5f5}.fc-theme-standard .fc-list{border:1px solid #ddd;border:1px solid var(--fc-border-color, #ddd)}.fc .fc-list-empty{background-color:rgba(208,208,208,.3);background-color:var(--fc-neutral-bg-color, rgba(208, 208, 208, 0.3));height:100%;display:flex;justify-content:center;align-items:center}.fc .fc-list-empty-cushion{margin:5em 0}.fc .fc-list-table{width:100%;border-style:hidden}.fc .fc-list-table tr>*{border-left:0;border-right:0}.fc .fc-list-sticky .fc-list-day>*{position:sticky;top:0;background:#fff;background:var(--fc-page-bg-color, #fff)}.fc .fc-list-table thead{position:absolute;left:-10000px}.fc .fc-list-table tbody>tr:first-child th{border-top:0}.fc .fc-list-table th{padding:0}.fc .fc-list-table td,.fc .fc-list-day-cushion{padding:8px 14px}.fc .fc-list-day-cushion:after{content:"";clear:both;display:table}.fc-theme-standard .fc-list-day-cushion{background-color:rgba(208,208,208,.3);background-color:var(--fc-neutral-bg-color, rgba(208, 208, 208, 0.3))}.fc-direction-ltr .fc-list-day-text,.fc-direction-rtl .fc-list-day-side-text{float:left}.fc-direction-ltr .fc-list-day-side-text,.fc-direction-rtl .fc-list-day-text{float:right}.fc-direction-ltr .fc-list-table .fc-list-event-graphic{padding-right:0}.fc-direction-rtl .fc-list-table .fc-list-event-graphic{padding-left:0}.fc .fc-list-event.fc-event-forced-url{cursor:pointer}.fc .fc-list-event:hover td{background-color:#f5f5f5;background-color:var(--fc-list-event-hover-bg-color, #f5f5f5)}.fc .fc-list-event-graphic,.fc .fc-list-event-time{white-space:nowrap;width:1px}.fc .fc-list-event-dot{display:inline-block;box-sizing:content-box;width:0;height:0;border:5px solid #3788d8;border:calc(var(--fc-list-event-dot-width, 10px)/2) solid var(--fc-event-border-color, #3788d8);border-radius:5px;border-radius:calc(var(--fc-list-event-dot-width, 10px)/2)}.fc .fc-list-event-title a{color:inherit;text-decoration:none}.fc .fc-list-event.fc-event-forced-url:hover a{text-decoration:underline}',"",{version:3,sources:["webpack://./node_modules/@fullcalendar/list/main.css"],names:[],mappings:"AACA,MACE,+BAAA,CACA,uCAAA,CAEF,4BACI,qBAAA,CACA,6CAAA,CAOJ,mBACI,qCAAA,CACA,qEAAA,CACA,WAAA,CACA,YAAA,CACA,sBAAA,CACA,kBAAA,CAEJ,2BACI,YAAA,CAQJ,mBACI,UAAA,CACA,mBAAA,CAEJ,wBACI,aAAA,CACA,cAAA,CAEJ,mCACM,eAAA,CACA,KAAA,CACA,eAAA,CACA,wCAAA,CAON,yBACI,iBAAA,CACA,aAAA,CAOJ,2CACI,YAAA,CAEJ,sBACI,SAAA,CAEJ,+CAEI,gBAAA,CASJ,+BACE,UAAA,CACA,UAAA,CACA,aAAA,CAEF,wCACI,qCAAA,CACA,qEAAA,CAEJ,6EAEE,UAAA,CAEF,6EAEE,WAAA,CAGF,wDAAA,eAAA,CACA,wDAAA,cAAA,CACA,uCACI,cAAA,CAEJ,4BACI,wBAAA,CACA,6DAAA,CAOJ,mDAEI,kBAAA,CACA,SAAA,CAEJ,uBACI,oBAAA,CACA,sBAAA,CACA,OAAA,CACA,QAAA,CACA,wBAAA,CACA,+FAAA,CACA,iBAAA,CACA,0DAAA,CAOJ,2BACI,aAAA,CACA,oBAAA,CAOJ,+CACI,yBAAA",sourcesContent:['\n:root {\n --fc-list-event-dot-width: 10px;\n --fc-list-event-hover-bg-color: #f5f5f5;\n}\n.fc-theme-standard .fc-list {\n border: 1px solid #ddd;\n border: 1px solid var(--fc-border-color, #ddd);\n }\n.fc {\n\n /* message when no events */\n\n}\n.fc .fc-list-empty {\n background-color: rgba(208, 208, 208, 0.3);\n background-color: var(--fc-neutral-bg-color, rgba(208, 208, 208, 0.3));\n height: 100%;\n display: flex;\n justify-content: center;\n align-items: center; /* vertically aligns fc-list-empty-inner */\n }\n.fc .fc-list-empty-cushion {\n margin: 5em 0;\n }\n.fc {\n\n /* table within the scroller */\n /* ---------------------------------------------------------------------------------------------------- */\n\n}\n.fc .fc-list-table {\n width: 100%;\n border-style: hidden; /* kill outer border on theme */\n }\n.fc .fc-list-table tr > * {\n border-left: 0;\n border-right: 0;\n }\n.fc .fc-list-sticky .fc-list-day > * { /* the cells */\n position: sticky;\n top: 0;\n background: #fff;\n background: var(--fc-page-bg-color, #fff); /* for when headers are styled to be transparent and sticky */\n }\n.fc {\n\n /* only exists for aria reasons, hide for non-screen-readers */\n\n}\n.fc .fc-list-table thead {\n position: absolute;\n left: -10000px;\n }\n.fc {\n\n /* the table\'s border-style:hidden gets confused by hidden thead. force-hide top border of first cell */\n\n}\n.fc .fc-list-table tbody > tr:first-child th {\n border-top: 0;\n }\n.fc .fc-list-table th {\n padding: 0; /* uses an inner-wrapper instead... */\n }\n.fc .fc-list-table td,\n .fc .fc-list-day-cushion {\n padding: 8px 14px;\n }\n.fc {\n\n\n /* date heading rows */\n /* ---------------------------------------------------------------------------------------------------- */\n\n}\n.fc .fc-list-day-cushion:after {\n content: "";\n clear: both;\n display: table; /* clear floating */\n }\n.fc-theme-standard .fc-list-day-cushion {\n background-color: rgba(208, 208, 208, 0.3);\n background-color: var(--fc-neutral-bg-color, rgba(208, 208, 208, 0.3));\n }\n.fc-direction-ltr .fc-list-day-text,\n.fc-direction-rtl .fc-list-day-side-text {\n float: left;\n}\n.fc-direction-ltr .fc-list-day-side-text,\n.fc-direction-rtl .fc-list-day-text {\n float: right;\n}\n/* make the dot closer to the event title */\n.fc-direction-ltr .fc-list-table .fc-list-event-graphic { padding-right: 0 }\n.fc-direction-rtl .fc-list-table .fc-list-event-graphic { padding-left: 0 }\n.fc .fc-list-event.fc-event-forced-url {\n cursor: pointer; /* whole row will seem clickable */\n }\n.fc .fc-list-event:hover td {\n background-color: #f5f5f5;\n background-color: var(--fc-list-event-hover-bg-color, #f5f5f5);\n }\n.fc {\n\n /* shrink certain cols */\n\n}\n.fc .fc-list-event-graphic,\n .fc .fc-list-event-time {\n white-space: nowrap;\n width: 1px;\n }\n.fc .fc-list-event-dot {\n display: inline-block;\n box-sizing: content-box;\n width: 0;\n height: 0;\n border: 5px solid #3788d8;\n border: calc(var(--fc-list-event-dot-width, 10px) / 2) solid var(--fc-event-border-color, #3788d8);\n border-radius: 5px;\n border-radius: calc(var(--fc-list-event-dot-width, 10px) / 2);\n }\n.fc {\n\n /* reset <a> styling */\n\n}\n.fc .fc-list-event-title a {\n color: inherit;\n text-decoration: none;\n }\n.fc {\n\n /* underline link when hovering over any part of row */\n\n}\n.fc .fc-list-event.fc-event-forced-url:hover a {\n text-decoration: underline;\n }\n'],sourceRoot:""}]),t.Z=a},85042:function(A){A.exports="data:application/x-font-ttf;charset=utf-8;base64,AAEAAAALAIAAAwAwT1MvMg8SBfAAAAC8AAAAYGNtYXAXVtKNAAABHAAAAFRnYXNwAAAAEAAAAXAAAAAIZ2x5ZgYydxIAAAF4AAAFNGhlYWQUJ7cIAAAGrAAAADZoaGVhB20DzAAABuQAAAAkaG10eCIABhQAAAcIAAAALGxvY2ED4AU6AAAHNAAAABhtYXhwAA8AjAAAB0wAAAAgbmFtZXsr690AAAdsAAABhnBvc3QAAwAAAAAI9AAAACAAAwPAAZAABQAAApkCzAAAAI8CmQLMAAAB6wAzAQkAAAAAAAAAAAAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAABAAADpBgPA/8AAQAPAAEAAAAABAAAAAAAAAAAAAAAgAAAAAAADAAAAAwAAABwAAQADAAAAHAADAAEAAAAcAAQAOAAAAAoACAACAAIAAQAg6Qb//f//AAAAAAAg6QD//f//AAH/4xcEAAMAAQAAAAAAAAAAAAAAAQAB//8ADwABAAAAAAAAAAAAAgAANzkBAAAAAAEAAAAAAAAAAAACAAA3OQEAAAAAAQAAAAAAAAAAAAIAADc5AQAAAAABAWIAjQKeAskAEwAAJSc3NjQnJiIHAQYUFwEWMjc2NCcCnuLiDQ0MJAz/AA0NAQAMJAwNDcni4gwjDQwM/wANIwz/AA0NDCMNAAAAAQFiAI0CngLJABMAACUBNjQnASYiBwYUHwEHBhQXFjI3AZ4BAA0N/wAMJAwNDeLiDQ0MJAyNAQAMIw0BAAwMDSMM4uINIwwNDQAAAAIA4gC3Ax4CngATACcAACUnNzY0JyYiDwEGFB8BFjI3NjQnISc3NjQnJiIPAQYUHwEWMjc2NCcB87e3DQ0MIw3VDQ3VDSMMDQ0BK7e3DQ0MJAzVDQ3VDCQMDQ3zuLcMJAwNDdUNIwzWDAwNIwy4twwkDA0N1Q0jDNYMDA0jDAAAAgDiALcDHgKeABMAJwAAJTc2NC8BJiIHBhQfAQcGFBcWMjchNzY0LwEmIgcGFB8BBwYUFxYyNwJJ1Q0N1Q0jDA0Nt7cNDQwjDf7V1Q0N1QwkDA0Nt7cNDQwkDLfWDCMN1Q0NDCQMt7gMIw0MDNYMIw3VDQ0MJAy3uAwjDQwMAAADAFUAAAOrA1UAMwBoAHcAABMiBgcOAQcOAQcOARURFBYXHgEXHgEXHgEzITI2Nz4BNz4BNz4BNRE0JicuAScuAScuASMFITIWFx4BFx4BFx4BFREUBgcOAQcOAQcOASMhIiYnLgEnLgEnLgE1ETQ2Nz4BNz4BNz4BMxMhMjY1NCYjISIGFRQWM9UNGAwLFQkJDgUFBQUFBQ4JCRULDBgNAlYNGAwLFQkJDgUFBQUFBQ4JCRULDBgN/aoCVgQIBAQHAwMFAQIBAQIBBQMDBwQECAT9qgQIBAQHAwMFAQIBAQIBBQMDBwQECASAAVYRGRkR/qoRGRkRA1UFBAUOCQkVDAsZDf2rDRkLDBUJCA4FBQUFBQUOCQgVDAsZDQJVDRkLDBUJCQ4FBAVVAgECBQMCBwQECAX9qwQJAwQHAwMFAQICAgIBBQMDBwQDCQQCVQUIBAQHAgMFAgEC/oAZEhEZGRESGQAAAAADAFUAAAOrA1UAMwBoAIkAABMiBgcOAQcOAQcOARURFBYXHgEXHgEXHgEzITI2Nz4BNz4BNz4BNRE0JicuAScuAScuASMFITIWFx4BFx4BFx4BFREUBgcOAQcOAQcOASMhIiYnLgEnLgEnLgE1ETQ2Nz4BNz4BNz4BMxMzFRQWMzI2PQEzMjY1NCYrATU0JiMiBh0BIyIGFRQWM9UNGAwLFQkJDgUFBQUFBQ4JCRULDBgNAlYNGAwLFQkJDgUFBQUFBQ4JCRULDBgN/aoCVgQIBAQHAwMFAQIBAQIBBQMDBwQECAT9qgQIBAQHAwMFAQIBAQIBBQMDBwQECASAgBkSEhmAERkZEYAZEhIZgBEZGREDVQUEBQ4JCRUMCxkN/asNGQsMFQkIDgUFBQUFBQ4JCBUMCxkNAlUNGQsMFQkJDgUEBVUCAQIFAwIHBAQIBf2rBAkDBAcDAwUBAgICAgEFAwMHBAMJBAJVBQgEBAcCAwUCAQL+gIASGRkSgBkSERmAEhkZEoAZERIZAAABAOIAjQMeAskAIAAAExcHBhQXFjI/ARcWMjc2NC8BNzY0JyYiDwEnJiIHBhQX4uLiDQ0MJAzi4gwkDA0N4uINDQwkDOLiDCQMDQ0CjeLiDSMMDQ3h4Q0NDCMN4uIMIw0MDOLiDAwNIwwAAAABAAAAAQAAa5n0y18PPPUACwQAAAAAANivOVsAAAAA2K85WwAAAAADqwNVAAAACAACAAAAAAAAAAEAAAPA/8AAAAQAAAAAAAOrAAEAAAAAAAAAAAAAAAAAAAALBAAAAAAAAAAAAAAAAgAAAAQAAWIEAAFiBAAA4gQAAOIEAABVBAAAVQQAAOIAAAAAAAoAFAAeAEQAagCqAOoBngJkApoAAQAAAAsAigADAAAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAA4ArgABAAAAAAABAAcAAAABAAAAAAACAAcAYAABAAAAAAADAAcANgABAAAAAAAEAAcAdQABAAAAAAAFAAsAFQABAAAAAAAGAAcASwABAAAAAAAKABoAigADAAEECQABAA4ABwADAAEECQACAA4AZwADAAEECQADAA4APQADAAEECQAEAA4AfAADAAEECQAFABYAIAADAAEECQAGAA4AUgADAAEECQAKADQApGZjaWNvbnMAZgBjAGkAYwBvAG4Ac1ZlcnNpb24gMS4wAFYAZQByAHMAaQBvAG4AIAAxAC4AMGZjaWNvbnMAZgBjAGkAYwBvAG4Ac2ZjaWNvbnMAZgBjAGkAYwBvAG4Ac1JlZ3VsYXIAUgBlAGcAdQBsAGEAcmZjaWNvbnMAZgBjAGkAYwBvAG4Ac0ZvbnQgZ2VuZXJhdGVkIGJ5IEljb01vb24uAEYAbwBuAHQAIABnAGUAbgBlAHIAYQB0AGUAZAAgAGIAeQAgAEkAYwBvAE0AbwBvAG4ALgAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="}}]);
//# sourceMappingURL=5899.3ad49e66af3ff7c33275.min.js.map | PypiClean |
/DoThings-0.2.tar.gz/DoThings-0.2/things/things.py | import pickle
import sys
from os import path
from docopt import docopt
class TermColor:
RED = "\033[91m"
GREEN = "\033[92m"
BOLD = "\033[1m"
END = "\033[0m"
class TermSign:
CHECK = u"\u2714".encode("utf8")
START = u"\u2731".encode("utf8")
BALLOTBOXWITHCHECK = u"\u2611".encode("utf8")
BALLOTBOX = u"\u2610".encode("utf8")
class Thing(object):
def __init__(self, name, undo=True):
self.name = name
self.undo = undo
class ToDo(object):
def __init__(self, saved_file=".todo"):
self.saved_file = saved_file
self.todos = []
if path.exists(saved_file):
with open(saved_file) as fp:
self.todos = pickle.load(fp)
def add(self, thing):
self.todos.append(thing)
self._save()
def update(self, index, updated_thing):
self.todos[index-1] = updated_thing
self._save()
def done(self, index):
self.todos[index-1].undo = False
self._save()
def undo(self, index):
self.todos[index-1].undo = True
self._save()
def remove(self, index):
del self.todos[index-1]
self._save()
def clear(self):
del self.todos[:]
self._save()
def _save(self):
with open(self.saved_file, "w+") as fp:
pickle.dump(self.todos, fp)
def print_todo(self):
print
# print "{0} TO-DO-List {0}".format("*"*32)
for index, thing in enumerate(self.todos):
if thing.undo:
print TermColor.RED + TermSign.START + TermColor.END,
print " {0}. {1}".format(index+1, thing.name)
print
def print_all(self):
print
# print "{0} Archieve-List {0}".format("*"*32)
for index, thing in enumerate(self.todos):
if thing.undo:
print TermColor.RED + TermSign.START + TermColor.END,
else:
print TermColor.GREEN + TermSign.CHECK + TermColor.END,
print " {0}. {1}".format(index+1, thing.name)
print
def main():
parser = docopt(__doc__)
td = ToDo()
try:
if parser["rm"]:
td.remove(int(parser["<num>"]))
elif parser["clear"]:
td.clear()
elif parser["done"]:
td.done(int(parser["<num>"]))
elif parser["undo"]:
td.undo(int(parser["<num>"]))
elif parser["<thing>"]:
thing = Thing(parser["<thing>"])
td.add(thing)
if parser["all"]:
td.print_all()
else:
td.print_todo()
except IndexError:
sys.stderr.write("Index is out of range, please retry...\n")
if __name__ == "__main__":
# print TermColor.RED + TermColor.BOLD + TermSign.START + TermColor.END
# print TermColor.GREEN + TermColor.BOLD +TermSign.CHECK + TermColor.END
main() | PypiClean |
/FlaskFarm-4.0.104-py3-none-any.whl/flaskfarm/lib/system/page_command.py | import queue
from support import SupportSubprocess
from tool import ToolModalCommand
from .setup import *
class PageCommand(PluginPageBase):
def __init__(self, P, parent):
super(PageCommand, self).__init__(P, parent, name='command')
self.db_default = {
f'{self.parent.name}_{self.name}_recent': '',
}
def process_menu(self, req):
arg = self.P.ModelSetting.to_dict()
arg['path_data'] = F.config['path_data']
return render_template(f'{self.P.package_name}_{self.parent.name}_{self.name}.html', arg=arg)
def process_command(self, command, arg1, arg2, arg3, req):
ret = {'ret':'success'}
if command == 'foreground_command':
P.ModelSetting.set(f'{self.parent.name}_{self.name}_recent', arg1)
self.__foreground_execute(arg1, arg1.split(' '))
return jsonify('')
elif command == 'job_new':
db_item = ModelCommand.job_new(arg1)
ret['msg'] = f"ID:{db_item.id} 작업을 생성하였습니다."
elif command == 'job_list':
ret['data'] = ModelCommand.job_list()
elif command == 'job_save':
data = P.logic.arg_to_dict(arg1)
db_item = ModelCommand.get_by_id(data['job_id'])
db_item.set_command(data['job_command'])
db_item.args = data['job_command_args']
db_item.description = data['job_description']
db_item.schedule_mode = data['job_schedule_mode']
db_item.schedule_auto_start = (data.get('job_schedule_auto_start', 'False') == 'True')
db_item.schedule_interval = data.get('job_schedule_interval', '')
db_item.save()
ret['msg'] = '수정하였습니다.'
elif command == 'job_remove':
if ModelCommand.delete_by_id(arg1):
ret['msg'] = '삭제하였습니다.'
else:
ret['ret'] = 'danger'
ret['msg'] = '삭제에 실패하였습니다.'
elif command == 'job_fore_execute':
db_item = ModelCommand.get_by_id(arg1)
cmd = (db_item.command + ' ' + db_item.args).strip()
self.__foreground_execute(f"Command ID: {db_item.id}", cmd.split(' '), db_item.id)
elif command == 'job_back_execute':
self.execute_thread_start(arg1)
ret['msg'] = "실행 요청을 하였습니다.<br>로그를 확인하세요."
elif command == 'job_log':
ret['filename'] = f"command_{arg1}.log"
if os.path.exists(os.path.join(F.config['path_data'], 'log', f"command_{arg1}.log")) == False:
ret['ret'] = 'danger'
ret['msg'] = "로그 파일이 없습니다."
elif command == 'task_sched':
job_id = arg1
flag = (arg2 == 'true')
scheduler_id = f'command_{job_id}'
if flag and F.scheduler.is_include(scheduler_id):
ret['msg'] = '이미 스케쥴러에 등록되어 있습니다.'
elif flag and F.scheduler.is_include(scheduler_id) == False:
result = self.__sched_add(job_id)
ret['msg'] = '스케쥴러에 추가하였습니다.'
elif flag == False and scheduler.is_include(scheduler_id):
result = scheduler.remove_job(scheduler_id)
ret['msg'] = '스케쥴링 취소'
elif flag == False and scheduler.is_include(scheduler_id) == False:
ret['msg'] = '등록되어 있지 않습니다.'
elif command == 'job_process_stop':
process_ins = SupportSubprocess.get_instance_by_call_id(f"command_{arg1}")
if process_ins == None:
ret['msg'] = "실행중인 Process가 없습니다."
else:
process_ins.process_close()
ret['msg'] = "Process를 중지하였습니다."
return jsonify(ret)
def __foreground_execute(self, title, command, job_id=None):
if command[0] != 'LOAD':
ToolModalCommand.start(title, [command])
else:
F.socketio.emit("command_modal_show", title, namespace='/framework', broadcast=True)
def start_communicate_load(load_log_list):
def func():
while True:
logs = load_log_list.getvalue()
load_log_list.truncate(0)
if logs:
P.logger.error(logs)
F.socketio.emit("command_modal_add_text", logs.strip() + '\n', namespace='/framework', broadcast=True)
if logs == '<<END>>':
break
time.sleep(0.3)
th = threading.Thread(target=func)
th.setDaemon(True)
th.start()
def func():
import io
from contextlib import redirect_stdout
load_log_list = io.StringIO()
with redirect_stdout(load_log_list):
start_communicate_load(load_log_list)
if job_id is not None:
command_logger = get_logger(f'command_{job_id}')
else:
command_logger = P.logger
self.__module_load(command, logger=command_logger)
load_log_list.write("<<END>>")
load_log_list.flush()
th = threading.Thread(target=func, args=())
th.setDaemon(True)
th.start()
return 'success'
def __module_load(self, command, **kwargs):
try:
python_filename = command[1]
python_sys_path = os.path.dirname(python_filename)
if python_sys_path not in sys.path:
sys.path.append(python_sys_path)
module_name = os.path.basename(python_filename).split('.py')[0]
if module_name not in sys.path:
sys.path.append(module_name)
import importlib
mod = importlib.import_module(module_name)
importlib.reload(mod)
args = command
mod_command_load = getattr(mod, 'main')
if mod_command_load:
ret = mod_command_load(*args, **kwargs)
return ret
except Exception as e:
P.logger.error(f'Exception:{str(e)}')
P.logger.error(traceback.format_exc())
def execute_thread_start(self, job_id):
th = threading.Thread(target=self.execute_thread_function_by_job_id, args=(job_id,))
th.setDaemon(True)
th.start()
return th
def execute_thread_function_by_job_id(self, *args, **kwargs):
#P.logger.error(d(args))
#P.logger.error(d(kwargs))
db_item = ModelCommand.get_by_id(args[0])
kwargs['id'] = args[0]
self.execute_thread_function((db_item.command + ' ' + db_item.args).strip(), **kwargs)
def execute_thread_function(self, command, **kwargs):
try:
cmd = command.split(' ')
if cmd[0] == 'LOAD':
command_logger = F.get_logger(f"command_{kwargs['id']}")
kwargs['logger'] = command_logger
return self.__module_load(cmd, **kwargs)
else:
class LogReceiver:
def __init__(self, logger):
self.logger = logger
def stdout_callback(self, call_id, mode, text):
if mode == 'LOG':
self.logger.debug(text)
else:
self.logger.debug(mode)
command_logger = F.get_logger(f"command_{kwargs['id']}", from_command=True)
receiver = LogReceiver(command_logger)
process = SupportSubprocess(cmd, stdout_callback=receiver.stdout_callback, call_id=f"command_{kwargs['id']}")
process.start()
except Exception as e:
P.logger.error(f'Exception:{str(e)}')
P.logger.error(traceback.format_exc())
def plugin_load(self):
def plugin_load_thread():
try:
db_items = ModelCommand.get_list()
for db_item in db_items:
if db_item.schedule_mode == 'startup':
self.execute_thread_start(db_item.id)
elif db_item.schedule_mode == 'scheduler' and db_item.schedule_auto_start:
self.__sched_add(db_item.id, db_item=db_item)
except Exception as e:
logger.error(f"Exception:{str(e)}")
logger.error(traceback.format_exc())
try:
th = threading.Thread(target=plugin_load_thread)
th.setDaemon(True)
th.start()
except Exception as e:
P.logger.error(f'Exception:{str(e)}')
P.logger.error(traceback.format_exc())
def __sched_add(self, id, db_item=None):
try:
if db_item is None:
db_item = ModelCommand.get_by_id(id)
job_id = f"command_{db_item.id}"
if scheduler.is_include(job_id):
return
job = Job(self.P.package_name, job_id, db_item.schedule_interval, self.execute_thread_function_by_job_id, db_item.description, args=(db_item.id,))
scheduler.add_job_instance(job)
return True
except Exception as e:
P.logger.error(f'Exception:{str(e)}')
P.logger.error(traceback.format_exc())
return False
class ModelCommand(ModelBase):
__tablename__ = 'command_job'
__table_args__ = {'mysql_collate': 'utf8_general_ci'}
__bind_key__ = 'system'
id = db.Column(db.Integer, primary_key=True)
command = db.Column(db.String)
filepath = db.Column(db.String)
args = db.Column(db.String)
description = db.Column(db.String)
schedule_mode = db.Column(db.String) # none, startup, scheduler
schedule_auto_start = db.Column(db.Boolean) # 시작시 스케쥴링 등록
schedule_interval = db.Column(db.String) # 주기
def __init__(self, command):
self.args = ''
self.description = ''
self.schedule_mode = 'none'
self.schedule_auto_start = False
self.schedule_interval = ''
self.set_command(command)
def set_command(self, command):
self.command = command
tmp = command.split(' ')
for t in tmp:
for ext in ['.py', '.sh', '.bat']:
if t.endswith(ext):
self.filepath = t
break
@classmethod
def job_new(cls, command):
item = ModelCommand(command)
return item.save()
@classmethod
def job_list(cls):
try:
data = cls.get_list(by_dict=True)
for item in data:
item['scheduler_is_include'] = F.scheduler.is_include(f"command_{item['id']}")
item['scheduler_is_running'] = F.scheduler.is_running(f"command_{item['id']}")
item['process'] = (SupportSubprocess.get_instance_by_call_id(f"command_{item['id']}") != None)
return data
except Exception as e:
logger.error(f"Exception:{str(e)}")
logger.error(traceback.format_exc()) | PypiClean |
/D-Analyst-1.0.6.tar.gz/D-Analyst-1.0.6/main/analyst/managers/mesh_manager.py | from analyst import NavigationEventProcessor, InteractionManager, \
PaintManager, \
GridEventProcessor, scale_matrix, rotation_matrix, translation_matrix, \
MeshNavigationEventProcessor
from .default_manager import DefaultPaintManager, DefaultInteractionManager, \
DefaultBindings
from .plot_manager import PlotBindings
import numpy as np
def load_mesh(filename):
"""Load vertices and faces from a wavefront .obj file and generate
normals.
"""
data = np.genfromtxt(filename, dtype=[('type', np.character, 1),
('points', np.float32, 3)])
vertices = data['points'][data['type'] == 'v']
faces = (data['points'][data['type'] == 'f']-1).astype(np.uint32)
T = vertices[faces]
N = np.cross(T[::,1 ]-T[::,0], T[::,2]-T[::,0])
L = np.sqrt(N[:,0]**2+N[:,1]**2+N[:,2]**2)
N /= L[:, np.newaxis]
normals = np.zeros(vertices.shape)
normals[faces[:,0]] += N
normals[faces[:,1]] += N
normals[faces[:,2]] += N
L = np.sqrt(normals[:,0]**2+normals[:,1]**2+normals[:,2]**2)
normals /= L[:, np.newaxis]
vmin, vmax = vertices.min(), vertices.max()
vertices = 2*(vertices-vmin)/(vmax-vmin) - 1
return vertices, normals, faces
class MeshInteractionManager(DefaultInteractionManager):
def initialize_default(self, constrain_navigation=None, momentum=None):
super(MeshInteractionManager, self).initialize_default()
self.add_processor(MeshNavigationEventProcessor, name='navigation')
self.add_processor(GridEventProcessor, name='grid')
class MeshPaintManager(DefaultPaintManager):
def initialize_default(self, *args, **kwargs):
super(MeshPaintManager, self).initialize_default(*args, **kwargs)
self.set_rendering_options(activate3D=True)
class MeshBindings(PlotBindings):
def initialize(self):
super(MeshBindings, self).initialize()
self.set_rotation_mouse()
self.set_rotation_keyboard()
def set_panning_mouse(self):
self.set('LeftClickMove', 'Pan',
param_getter=lambda p: (-4*p["mouse_position_diff"][0],
-4*p["mouse_position_diff"][1]))
def set_rotation_mouse(self):
self.set('MiddleClickMove', 'Rotation',
param_getter=lambda p: (3*p["mouse_position_diff"][0],
3*p["mouse_position_diff"][1]))
self.set('LeftClickMove', 'Rotation',
key_modifier='Control',
param_getter=lambda p: (3*p["mouse_position_diff"][0],
3*p["mouse_position_diff"][1]))
def set_rotation_keyboard(self):
"""Set zooming bindings with the keyboard."""
self.set('KeyPress', 'Rotation',
key='Left', key_modifier='Shift',
param_getter=lambda p: (-.25, 0))
self.set('KeyPress', 'Rotation',
key='Right', key_modifier='Shift',
param_getter=lambda p: (.25, 0))
self.set('KeyPress', 'Rotation',
key='Up', key_modifier='Shift',
param_getter=lambda p: (0, .25))
self.set('KeyPress', 'Rotation',
key='Down', key_modifier='Shift',
param_getter=lambda p: (0, -.25))
def set_zoombox_mouse(self):
"""Deactivate zoombox."""
pass
def set_zoombox_keyboard(self):
"""Deactivate zoombox."""
pass
def extend(self):
"""Set rotation interactions with mouse and keyboard."""
self.set_rotation_mouse()
self.set_rotation_keyboard() | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/dense_heads/atss_head.py | import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmcv.runner import force_fp32
from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,
images_to_levels, multi_apply, reduce_mean, unmap)
from ..builder import HEADS, build_loss
from .anchor_head import AnchorHead
@HEADS.register_module()
class ATSSHead(AnchorHead):
"""Bridging the Gap Between Anchor-based and Anchor-free Detection via
Adaptive Training Sample Selection.
ATSS head structure is similar with FCOS, however ATSS use anchor boxes
and assign label by Adaptive Training Sample Selection instead max-iou.
https://arxiv.org/abs/1912.02424
"""
def __init__(self,
num_classes,
in_channels,
pred_kernel_size=3,
stacked_convs=4,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
reg_decoded_bbox=True,
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='atss_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.pred_kernel_size = pred_kernel_size
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(ATSSHead, self).__init__(
num_classes,
in_channels,
reg_decoded_bbox=reg_decoded_bbox,
init_cfg=init_cfg,
**kwargs)
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# SSD sampling=False so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.loss_centerness = build_loss(loss_centerness)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pred_pad_size = self.pred_kernel_size // 2
self.atss_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
self.pred_kernel_size,
padding=pred_pad_size)
self.atss_reg = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=pred_pad_size)
self.atss_centerness = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 1,
self.pred_kernel_size,
padding=pred_pad_size)
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.prior_generator.strides])
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
centerness (Tensor): Centerness for a single scale level, the
channel number is (N, num_anchors * 1, H, W).
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.atss_cls(cls_feat)
# we just follow atss, not apply exp in bbox_pred
bbox_pred = scale(self.atss_reg(reg_feat)).float()
centerness = self.atss_centerness(reg_feat)
return cls_score, bbox_pred, centerness
def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
label_weights, bbox_targets, num_total_samples):
"""Compute loss of a single scale level.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 4).
num_total_samples (int): Number os positive samples that is
reduced over all GPUs.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels).contiguous()
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# classification loss
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_centerness = centerness[pos_inds]
centerness_targets = self.centerness_target(
pos_anchors, pos_bbox_targets)
pos_decode_bbox_pred = self.bbox_coder.decode(
pos_anchors, pos_bbox_pred)
# regression loss
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_bbox_targets,
weight=centerness_targets,
avg_factor=1.0)
# centerness loss
loss_centerness = self.loss_centerness(
pos_centerness,
centerness_targets,
avg_factor=num_total_samples)
else:
loss_bbox = bbox_pred.sum() * 0
loss_centerness = centerness.sum() * 0
centerness_targets = bbox_targets.new_tensor(0.)
return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
centernesses (list[Tensor]): Centerness for each scale
level with shape (N, num_anchors * 1, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = reduce_mean(
torch.tensor(num_total_pos, dtype=torch.float,
device=device)).item()
num_total_samples = max(num_total_samples, 1.0)
losses_cls, losses_bbox, loss_centerness,\
bbox_avg_factor = multi_apply(
self.loss_single,
anchor_list,
cls_scores,
bbox_preds,
centernesses,
labels_list,
label_weights_list,
bbox_targets_list,
num_total_samples=num_total_samples)
bbox_avg_factor = sum(bbox_avg_factor)
bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
return dict(
loss_cls=losses_cls,
loss_bbox=losses_bbox,
loss_centerness=loss_centerness)
def centerness_target(self, anchors, gts):
# only calculate pos centerness targets, otherwise there may be nan
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
l_ = anchors_cx - gts[:, 0]
t_ = anchors_cy - gts[:, 1]
r_ = gts[:, 2] - anchors_cx
b_ = gts[:, 3] - anchors_cy
left_right = torch.stack([l_, r_], dim=1)
top_bottom = torch.stack([t_, b_], dim=1)
centerness = torch.sqrt(
(left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
assert not torch.isnan(centerness).any()
return centerness
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""Get targets for ATSS head.
This method is almost the same as `AnchorHead.get_targets()`. Besides
returning the targets as the parent method does, it also returns the
anchors as the first element of the returned tuple.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, bbox_weights_list, num_total_pos,
num_total_neg)
def _get_target_single(self,
flat_anchors,
valid_flags,
num_level_anchors,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression, classification targets for anchors in a single
image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
num_level_anchors Tensor): Number of anchors of each scale level.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
img_meta (dict): Meta info of the image.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4)
pos_inds (Tensor): Indices of positive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
num_level_anchors_inside = self.get_num_level_anchors_inside(
num_level_anchors, inside_flags)
assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
gt_bboxes, gt_bboxes_ignore,
gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if self.reg_decoded_bbox:
pos_bbox_targets = sampling_result.pos_gt_bboxes
else:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class since v2.5.0
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
pos_inds, neg_inds)
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
split_inside_flags = torch.split(inside_flags, num_level_anchors)
num_level_anchors_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_anchors_inside | PypiClean |
/Lmgeo-1.1.0.tar.gz/Lmgeo-1.1.0/lmgeo/formats/tiletiffraster.py | from __future__ import division
from .const import constants as const
from .gridenvelope2d import GridEnvelope2D;
from .basetiffraster import BaseTiffRaster
from libtiff import TIFF, libtiff
import numpy as np
import os
from math import floor, ceil, sqrt
__author__ = "Steven B. Hoek"
class TileTiffRaster(BaseTiffRaster, GridEnvelope2D):
'''
A raster represented by 2 files, with extensions 'tif' and 'tfw'
This class can deal with tiff files of which the data are stored in tiles
Different layers - e.g. RGB - are as planes: contiguously = chunky = interleaved
or separately = per channel. More info: http://www.fileformat.info/format/tiff/egff.htm
It means that the number of planes and the planar configuration determines the shape
of the array written as bitmapped data, with dimensions image_depth, image_height,
image_width and samples. E.g. in the case of rgb and contiguous configuration, the last
dimension of the array is expected to be 3 and the field samples per pixel will also be 3
'''
# Private attributes
_const = None
__mode = 'r'
__datatype = const.FLOAT;
currow = -1;
__envelope = None;
__image = None; # contains current strip
__bits_per_sample = 8
__sample_format = 1
__samples_per_pixel = 1
__numpy_type = np.uint8
__itemsize = 1
__layer_size = 1
__tile_width = 1
__tile_length = 1
__ntiles = 1
__nstrips = 1
def __init__(self, filepath, *datatype):
# Initialise
BaseTiffRaster.__init__(filepath)
GridEnvelope2D.__init__(self, 1, 1, 0.0, 0.0, 0.1, 0.1)
if self._const == None:
raise AttributeError("TIFF raster not properly initialised!")
# Finally set the datatype
if len(datatype) > 0:
if (datatype[0] == const.INTEGER):
self.__datatype = const.INTEGER;
else:
self.__datatype = const.FLOAT;
def __get_sample_format(self, arr):
result = None
# Not considered: SAMPLEFORMAT_VOID=4 and SAMPLEFORMAT_COMPLEXINT=5
if arr.dtype in np.sctypes['float']:
result = 3 #SAMPLEFORMAT_IEEEFP
elif arr.dtype in np.sctypes['uint']+[np.bool]:
result = 1 #SAMPLEFORMAT_UINT
elif arr.dtype in np.sctypes['int']:
result = 2 #SAMPLEFORMAT_INT
elif arr.dtype in np.sctypes['complex']:
result = 6 #SAMPLEFORMAT_COMPLEXIEEEFP
else:
raise NotImplementedError(arr.dtype)
return result
def set_numpy_type(self, atype):
self.__numpy_type = atype
def get_numpy_type(self):
return self.datafile.get_numpy_type(self.__bits_per_sample, self.__sample_format)
def open(self, mode, ncols=1, nrows=1, xll=0, yll=0, cellsize=100, nodatavalue=-9999.0, byteorder='II', compression=1):
# Initialise
super(TileTiffRaster, self).open(mode);
# If file does not exist and mode[0] = 'w', create it!
if (mode[0] == 'w'):
# Initialise
self.__mode = mode
self.datafile = TIFF.open(os.path.join(self.folder, self.name), mode='w');
# If dx and dy have been set to different values already, make sure those are written to disk
if abs(self.dx - self.dy) < const.epsilon:
self.__envelope = GridEnvelope2D.__init__(self, ncols, nrows, xll, yll, cellsize, cellsize)
else:
self.__envelope = GridEnvelope2D.__init__(self, ncols, nrows, xll, yll, self.dx, self.dy)
# Set the fields
self.datafile.SetField(self._const.IMAGE_WIDTH, ncols)
self.datafile.SetField(self._const.IMAGE_LENGTH, nrows)
self.datafile.SetField(self._const.BITS_PER_SAMPLE, self.__bits_per_sample)
self.datafile.SetField(self._const.SAMPLE_PER_PIXEL, self.__samples_per_pixel)
super(TileTiffRaster, self).set_extra_tags()
# Data are organised into square tiles. Let each tile be about 8K bytes
bits_per_pixel = self.__samples_per_pixel * self.__bits_per_sample
pixels_per_tile = max(int(floor(8 * 8000 / bits_per_pixel)), 1)
self.__tile_width = floor(sqrt(pixels_per_tile))
self.__tile_length = self.__tile_width
self.__ntiles = int(ceil(ncols / self.__tile_width) * ceil(nrows / self.__tile_length))
self.__nstrips = int(self.ntiles / ceil(self.ncols / self.__tile_width))
self.datafile.SetField(b"TileWidth", self.__tile_width)
self.datafile.SetField(b"TileLength", self.__tile_length)
self.datafile.SetField(self._const.PLANAR_CONFIG, 1) # contiguous
self.datafile.SetField(self._const.ORIENTATION, 1) # top left
self.datafile.SetField(self._const.PAGE_NUMBER, 1, 1)
self.datafile.SetField(self._const.FILL_ORDER, 1) # MSB2LSB
self.datafile.SetField(self._const.COMPRESSION, compression)
self.writeheader()
shape = (self.__tile_length * self.ncols, self.__samples_per_pixel)
self.__image = np.zeros(shape, self.__numpy_type)
return True;
else:
# Open the file as well as the header file
if self.file_exists:
self.datafile = TIFF.open(os.path.join(self.folder, self.name), mode='r');
self.readheader();
# Check whether found values warrant further execution
self.ncols = int(self.datafile.GetField(self._const.IMAGE_WIDTH))
self.nrows = int(self.datafile.GetField(self._const.IMAGE_LENGTH))
self.__tile_width = int(self.datafile.GetField("TileWidth"))
self.__tile_length = int(self.datafile.GetField("TileLength"))
self.__ntiles = libtiff.TIFFNumberOfTiles(self.datafile).value
# Tiles can be joined to form strips: ceil(ncols / tile_width) in number, with height = tile_length
# Those strips can be joined to form the image: ceil(nrows / tile_length) in number
msg = "Number of tiles not in accordance with tile and image dimensions!"
self.__nstrips = int(ceil(self.nrows / self.__tile_length))
num_tiles_per_strip = int(ceil(self.ncols / self.__tile_width))
assert self.__ntiles == self.__nstrips * num_tiles_per_strip, msg
planar_config = self.datafile.GetField(self._const.PLANAR_CONFIG)
if (planar_config > 1):
raise NotImplementedError("Not yet able to deal with data organised in separate planes")
if self.datafile.GetField(self._const.GDAL_NODATA) != None:
if self.__datatype == const.INTEGER:
self.nodatavalue = int(self.datafile.GetField(self._const.GDAL_NODATA))
else:
self.nodatavalue = float(self.datafile.GetField(self._const.GDAL_NODATA))
super(TileTiffRaster, self).get_extra_tags()
# Process further information from the header file
self.xll = self.xul;
if self.ycoords_sort == const.DESC:
self.yll = self.yul - self.nrows * self.dy;
else:
self.yll = self.yul + self.nrows * self.dy;
# Prepare to read the file (strip by strip and under the hood tile by tile)
self.__bits_per_sample = self.datafile.GetField(self._const.BITS_PER_SAMPLE)
self.__sample_format = self.datafile.GetField(self._const.SAMPLE_FORMAT)
self.__samples_per_pixel = self.datafile.GetField(self._const.SAMPLE_PER_PIXEL)
self.__numpy_type = self.datafile.get_numpy_type(self.__bits_per_sample, self.__sample_format)
self.__itemsize = self.__bits_per_sample / 8
shape = (self.__tile_length * self.ncols, self.__samples_per_pixel)
self.__image = np.zeros(shape, self.__numpy_type)
return True;
else: return False;
def get_tag(self, name):
return self.datafile.get_tag_name(name)
def next(self, parseLine=True):
# Is it possible to proceed? Otherwise generate StopIteration
result = None;
self.currow += 1;
try:
if (self.currow >= self.nrows): raise StopIteration;
# Read a new strip when necessary
row_in_strip = self.currow % self.__tile_length # also zero-based!
curstrip = int(floor(self.currow / self.__tile_length))
if curstrip >= self.__nstrips: raise StopIteration;
if row_in_strip == 0:
# Are we dealing with one plane or with more? What configuratin?
# self.datafile.GetField("PlanarConfig", 1))
if curstrip == self.__nstrips-1:
# Last strip
length = self.nrows - self.__tile_length * curstrip
self.__layer_size = (self.ncols) * length * (self.__samples_per_pixel) * (self.__itemsize)
self.__image = np.zeros((length, self.ncols, self.__samples_per_pixel), dtype=self.__numpy_type)
#.resize((last_length, self.ncols, self.__samples_per_pixel))
else:
length = self.__tile_length
self.__layer_size = (self.ncols) * length * (self.__samples_per_pixel) * (self.__itemsize)
# Before trying to read, reset the buffer
self.__image.fill(0.0)
self.__ReadStrip(curstrip, self.__image, int(self.__layer_size))
self.__image = self.__image.reshape(length, self.ncols, self.__samples_per_pixel)
# Read the next row
result = self.__image[row_in_strip, :, 0]
return result
except StopIteration:
raise StopIteration;
except Exception as e:
print(str(e))
def writenext(self, sequence_with_data):
raise NotImplementedError("Not implemented yet")
def reset(self):
self.currow = -1;
def __ReadStrip(self, strip, buf, size):
result = False
try:
num_tiles_per_strip = int(ceil(self.ncols / self.__tile_width))
numpy_type = self.datafile.get_numpy_type(self.__bits_per_sample, self.__sample_format)
if (strip == self.__nstrips-1):
length = self.nrows - (strip*self.__tile_length)
else:
length = self.__tile_length
buf = buf.reshape(length, self.ncols, self.__samples_per_pixel)
for k in range(num_tiles_per_strip):
if (k == num_tiles_per_strip-1):
# We only need part of the tile because we are on the edge
width = self.ncols - (num_tiles_per_strip-1)*self.__tile_width
else:
width = self.__tile_width
tmp_buf = np.ascontiguousarray(np.zeros((self.__tile_length, self.__tile_width), numpy_type))
seq = libtiff.TIFFReadTile(self.datafile, tmp_buf.ctypes.data, k*self.__tile_width, strip*self.__tile_length, 0, 0)
if seq != None:
start = k*self.__tile_width
buf[0:length, start:start+width, 0] = tmp_buf[0:length, 0:width]
result = True
except Exception as e:
print(str(e))
finally:
return result
def __WriteStrip(self,strip, buf, size):
raise NotImplementedError("Not implemented yet")
def close(self):
if self.__mode[0] == 'w':
self.datafile.WriteDirectory()
super(TileTiffRaster, self).close() | PypiClean |
/Newgram-0.0.5.tar.gz/Newgram-0.0.5/newgram/types/inline_mode/inline_query_result_article.py |
import newgram
from newgram import raw
from newgram import types
from .inline_query_result import InlineQueryResult
class InlineQueryResultArticle(InlineQueryResult):
"""Link to an article or web page.
Parameters:
title (``str``):
Title for the result.
input_message_content (:obj:`~newgram.types.InputMessageContent`):
Content of the message to be sent.
id (``str``, *optional*):
Unique identifier for this result, 1-64 bytes.
Defaults to a randomly generated UUID4.
url (``str``, *optional*):
URL of the result.
description (``str``, *optional*):
Short description of the result.
reply_markup (:obj:`~newgram.types.InlineKeyboardMarkup`, *optional*):
Inline keyboard attached to the message.
thumb_url (``str``, *optional*):
Url of the thumbnail for the result.
thumb_width (``int``, *optional*):
Thumbnail width.
thumb_height (``int``, *optional*):
Thumbnail height
"""
def __init__(
self,
title: str,
input_message_content: "types.InputMessageContent",
id: str = None,
url: str = None,
description: str = None,
reply_markup: "types.InlineKeyboardMarkup" = None,
thumb_url: str = None,
thumb_width: int = 0,
thumb_height: int = 0
):
super().__init__("article", id, input_message_content, reply_markup)
self.title = title
self.url = url
self.description = description
self.thumb_url = thumb_url
self.thumb_width = thumb_width
self.thumb_height = thumb_height
async def write(self, client: "newgram.Client"):
return raw.types.InputBotInlineResult(
id=self.id,
type=self.type,
send_message=await self.input_message_content.write(client, self.reply_markup),
title=self.title,
description=self.description,
url=self.url,
thumb=raw.types.InputWebDocument(
url=self.thumb_url,
size=0,
mime_type="image/jpeg",
attributes=[
raw.types.DocumentAttributeImageSize(
w=self.thumb_width,
h=self.thumb_height
)
]
) if self.thumb_url else None
) | PypiClean |
/AircraftDR-0.1-py3-none-any.whl/ADR/Components/Plane.py | from ADR.Components.Aerodynamic_components.Wing import Wing
from ADR.Components.Aerodynamic_components.HS import HS
from ADR.Components.Propulsion.Motor import Motor
from ADR.Components.Points.CG import CG
from ADR.Components.Points.TPR import TPR
from ADR.Core.data_manipulation import dict_to_dataframe
import numpy as np
class Plane:
def __init__(self, data):
self.data = data
self.plane_type = data.get("plane_type")
wing1_data = {
"x": data.get("wing1_x"),
"y": data.get("wing1_y"),
"z": data.get("wing1_z"),
"airfoil_clmax": data.get("wing1_clmax_airfoil"),
"airfoil1_name": data.get("wing1_airfoil1_name"),
"airfoil2_name": data.get("wing1_airfoil2_name"),
"airfoil3_name": data.get("wing1_airfoil3_name"),
"span1": data.get("wing1_span1"),
"span2": data.get("wing1_span2"),
"chord1": data.get("wing1_chord1"),
"chord2": data.get("wing1_chord2"),
"chord3": data.get("wing1_chord3"),
"twist1": data.get("wing1_twist1"),
"twist2": data.get("wing1_twist2"),
"twist3": data.get("wing1_twist3"),
"incidence": data.get("wing1_incidence"),
"CM_ca": data.get("wing1_CM_ca"),
}
wing2_data = {
"x": data.get("wing2_x"),
"y": data.get("wing2_y"),
"z": data.get("wing2_z"),
"airfoil_clmax": data.get("wing2_clmax_airfoil"),
"airfoil1_name": data.get("wing2_airfoil1_name"),
"airfoil2_name": data.get("wing2_airfoil2_name"),
"airfoil3_name": data.get("wing2_airfoil3_name"),
"span1": data.get("wing2_span1"),
"span2": data.get("wing2_span2"),
"chord1": data.get("wing2_chord1"),
"chord2": data.get("wing2_chord2"),
"chord3": data.get("wing2_chord3"),
"twist1": data.get("wing2_twist1"),
"twist2": data.get("wing2_twist2"),
"twist3": data.get("wing2_twist3"),
"incidence": data.get("wing2_incidence"),
"CM_ca": data.get("wing2_CM_ca"),
}
hs_data = {
"x": data.get("hs_x"),
"y": data.get("hs_y"),
"z": data.get("hs_z"),
"airfoil_clmax": data.get("hs_clmax_airfoil"),
"airfoil1_name": data.get("hs_airfoil1_name"),
"airfoil2_name": data.get("hs_airfoil2_name"),
"airfoil3_name": data.get("hs_airfoil3_name"),
"span1": data.get("hs_span1"),
"span2": data.get("hs_span2"),
"chord1": data.get("hs_chord1"),
"chord2": data.get("hs_chord2"),
"chord3": data.get("hs_chord3"),
"twist1": data.get("hs_twist1"),
"twist2": data.get("hs_twist2"),
"twist3": data.get("hs_twist3"),
"incidence": data.get("hs_incidence"),
"CM_ca": data.get("hs_CM_ca"),
}
motor_data = {
"x": data.get("motor_x"),
"y": data.get("motor_y"),
"z": data.get("motor_z"),
"static_thrust": data.get("static_thrust"),
"linear_decay_coefficient": data.get("linear_decay_coefficient"),
}
cg_data = {"x": data.get("cg_x"), "z": data.get("cg_z")}
tpr_data = {"x": data.get("tpr_x"), "z": data.get("tpr_z")}
self.Iyy_TPR = data.get("Iyy_TPR")
self.CD_tp = data.get("CD_tp")
self.S_tp = data.get("S_tp")
self.CD_fus = data.get("CD_fus")
self.S_fus = data.get("S_fus")
self.u_k = data.get("u_k")
self.wing1 = Wing(wing1_data)
self.wing2 = Wing(wing2_data)
self.hs = HS(hs_data)
# self.vs = VS(vs_data)
self.motor = Motor(motor_data)
self.cg = CG(cg_data)
self.tpr = TPR(tpr_data)
self.V_stall = 0
self.V_min = 0
self.V_max = 0
self.V_takeoff = 0
self.mtow = 5
self.alpha_min = 0
self.alpha_max = 0
self.alpha_trimm_min = 0
self.alpha_trimm_max = 0
self.tail_trimm = 0
self.SM_alpha = None
self.trimm_for_low_angles = False
self.trimm_for_high_angles = False
self.positive_sm_for_positive_alphas = False
self.dimensions_are_good = False
self.total_dimensions = 0
self.dead_weight = 0
self.payload = 0
self.score = None
self.dead = False
self.get_CL_alpha_plane()
self.get_CD_alpha_plane()
self.set_alpha_range()
self.hs.set_incidence_range(self.stall_min, self.stall_max)
def __str__(self):
return self.__class__.__name__
def set_alpha_range(self):
wings_stall_min = max(self.wing1.stall_min, self.wing2.stall_min)
wings_stall_max = min(self.wing1.stall_max, self.wing2.stall_max)
# incidence_min = min(self.wing1.incidence, self.wing2.incidence)
# incidence_max = max(self.wing1.incidence, self.wing2.incidence)
# TODO: Incidence for now is fixed on 0 and should be better implemented
self.stall_min = wings_stall_min
self.stall_max = wings_stall_max
self.alpha_range = np.arange(self.stall_min, self.stall_max + 1)
def set_alpha_trimmed(self, alpha_airplane):
self.wing1.update_alpha(alpha_airplane)
if self.plane_type == "biplane":
self.wing2.update_alpha(alpha_airplane)
hs_incidence = np.interp(
alpha_airplane,
self.tail_trimm.index.values,
self.tail_trimm["hs_incidence"],
)
self.hs.incidence = hs_incidence
self.hs.update_alpha(alpha_airplane)
def get_CL_alpha_plane(self):
CL_alpha_plane = {}
for alpha in np.arange(-10, 21, 1.0):
numerator = (
self.wing1.get_CL(alpha) * self.wing1.area
- self.hs.get_CL(alpha) * self.hs.area
)
if self.plane_type == "biplane":
numerator += self.wing2.get_CL(alpha) * self.wing2.area
CL_alpha_plane[alpha] = numerator / self.wing1.area
self.CL_alpha = dict_to_dataframe(CL_alpha_plane, "CL", "alpha")
return self.CL_alpha
def get_CD_alpha_plane(self):
CD_alpha_plane = {}
for alpha in np.arange(-10, 21, 1.0):
numerator = (
self.wing1.get_CD(alpha) * self.wing1.area
- self.hs.get_CD(alpha) * self.hs.area
)
if self.plane_type == "biplane":
numerator += self.wing2.get_CD(alpha) * self.wing2.area
CD_alpha_plane[alpha] = numerator / self.wing1.area
self.CD_alpha = dict_to_dataframe(CD_alpha_plane, "CD", "alpha")
return self.CD_alpha
def get_V_stall(self, rho):
self.CL_max = self.CL_alpha.max()[0]
self.V_stall = (
(2 * self.mtow * 9.81) / (rho * self.wing1.area * self.CL_max)
) ** 0.5
return self.V_stall
def get_V_CLmin(self, rho):
self.CL_min = self.CL_alpha.min()[0]
self.V_CLmin = (
(2 * self.mtow * 9.81) / (rho * self.wing1.area * self.CL_min)
) ** 0.5
return self.V_CLmin
def show_plane(self):
pass
# print('-------------------------------------------------------------')
# print("\nPlane components:\n")
# print("\t--- ", self.wing1, " ---")
# "x": data.get("wing1_x"),
# "y": data.get("wing1_y"),
# "z": data.get("wing1_z"),
# "airfoil_clmax": data.get("wing1_clmax_airfoil"),
# "airfoil1_name": data.get("wing1_airfoil1_name"),
# "airfoil2_name": data.get("wing1_airfoil2_name"),
# "airfoil3_name": data.get("wing1_airfoil3_name"),
# "span1": data.get("wing1_span1"),
# "span2": data.get("wing1_span2"),
# "chord1": data.get("wing1_chord1"),
# "chord2": data.get("wing1_chord2"),
# "chord3": data.get("wing1_chord3"),
# "twist1": data.get("wing1_twist1"),
# "twist2": data.get("wing1_twist2"),
# "twist3": data.get("wing1_twist3"),
# "incidence": data.get("wing1_incidence"),
# "CM_ca": data.get("wing1_CM_ca"),
# print()
# print("\t--- ", self.wing2, " ---")
# "x": data.get("wing2_x"),
# "y": data.get("wing2_y"),
# "z": data.get("wing2_z"),
# "airfoil_clmax": data.get("wing2_clmax_airfoil"),
# "airfoil1_name": data.get("wing2_airfoil1_name"),
# "airfoil2_name": data.get("wing2_airfoil2_name"),
# "airfoil3_name": data.get("wing2_airfoil3_name"),
# "span1": data.get("wing2_span1"),
# "span2": data.get("wing2_span2"),
# "chord1": data.get("wing2_chord1"),
# "chord2": data.get("wing2_chord2"),
# "chord3": data.get("wing2_chord3"),
# "twist1": data.get("wing2_twist1"),
# "twist2": data.get("wing2_twist2"),
# "twist3": data.get("wing2_twist3"),
# "incidence": data.get("wing2_incidence"),
# "CM_ca": data.get("wing2_CM_ca"),
# print()
# print("\t--- ", self.hs, " ---")
# "x": data.get("hs_x"),
# "y": data.get("hs_y"),
# "z": data.get("hs_z"),
# "airfoil_clmax": data.get("hs_clmax_airfoil"),
# "airfoil1_name": data.get("hs_airfoil1_name"),
# "airfoil2_name": data.get("hs_airfoil2_name"),
# "airfoil3_name": data.get("hs_airfoil3_name"),
# "span1": data.get("hs_span1"),
# "span2": data.get("hs_span2"),
# "chord1": data.get("hs_chord1"),
# "chord2": data.get("hs_chord2"),
# "chord3": data.get("hs_chord3"),
# "twist1": data.get("hs_twist1"),
# "twist2": data.get("hs_twist2"),
# "twist3": data.get("hs_twist3"),
# "incidence": data.get("hs_incidence"),
# "CM_ca": data.get("hs_CM_ca"),
# print()
# "x": data.get("motor_x"),
# "y": data.get("motor_y"),
# "z": data.get("motor_z"),
# "static_thrust": data.get("static_thrust"),
# "linear_decay_coefficient": data.get("linear_decay_coefficient")
# print()
# "x": data.get("cg_x"),
# "z": data.get("cg_z")
# print()
# "x": data.get("tpr_x"),
# "z": data.get("tpr_z")
# print()
# print('-------------------------------------------------------------') | PypiClean |
/OOoPy-2.0.tar.gz/OOoPy-2.0/ooopy/Transformer.py |
from __future__ import absolute_import, print_function, unicode_literals
import time
import re
try :
from xml.etree.ElementTree import dump, SubElement, Element, tostring
from xml.etree.ElementTree import _namespace_map
except ImportError :
from elementtree.ElementTree import dump, SubElement, Element, tostring
from elementtree.ElementTree import _namespace_map
from copy import deepcopy
from ooopy.autosuper import autosuper
from ooopy.Version import VERSION
from ooopy.OOoPy import OOoPy, files, mimetypes, namespace_by_name
def OOo_Tag (namespace, name, mimetype) :
"""Return combined XML tag
>>> print (OOo_Tag ('xml', 'id', mimetypes [1]))
{http://www.w3.org/XML/1998/namespace}id
>>> print (OOo_Tag ('text', 'list', mimetypes [1]))
{urn:oasis:names:tc:opendocument:xmlns:text:1.0}list
"""
return "{%s}%s" % (namespace_by_name [mimetype][namespace], name)
# end def OOo_Tag
def split_tag (tag) :
""" Split tag into symbolic namespace and name part -- inverse
operation of OOo_Tag.
"""
ns, t = tag.split ('}')
return (_namespace_map [ns [1:]], t)
# end def split_tag
class Transform (autosuper) :
"""
Base class for individual transforms on OOo files. An individual
transform needs a filename variable for specifying the OOo file
the transform should be applied to and an optional prio.
Individual transforms are applied according to their prio
setting, higher prio means later application of a transform.
The filename variable must specify one of the XML files which are
part of the OOo document (see files variable above). As
the names imply, content.xml contains the contents of the
document (text and ad-hoc style definitions), styles.xml contains
the style definitions, meta.xml contains meta information like
author, editing time, etc. and settings.xml is used to store
OOo's settings (menu Tools->Configure).
"""
prio = 100
textbody_names = \
{ mimetypes [0] : 'body'
, mimetypes [1] : 'text'
}
paragraph_props = \
{ mimetypes [0] : 'properties'
, mimetypes [1] : 'paragraph-properties'
}
font_decls = \
{ mimetypes [0] : 'font-decls'
, mimetypes [1] : 'font-face-decls'
}
def __init__ (self, prio = None, transformer = None) :
if prio is not None :
self.prio = prio
self.transformer = None
if transformer :
self.register (transformer)
# end def __init__
def apply (self, root) :
""" Apply myself to the element given as root """
raise NotImplementedError ('derived transforms must implement "apply"')
# end def apply
def apply_all (self, trees) :
""" Apply myself to all the files given in trees. The variable
trees contains a dictionary of ElementTree indexed by the
name of the OOo File.
The standard case is that only one file (namely
self.filename) is used.
"""
assert (self.filename)
self.apply (trees [self.filename].getroot ())
# end def apply_all
def find_tbody (self, root) :
""" Find the node which really contains the text -- different
for different OOo versions.
"""
tbody = root
if tbody.tag != self.textbody_tag :
tbody = tbody.find ('.//' + self.textbody_tag)
return tbody
# end def find_tbody
def register (self, transformer) :
""" Registering with a transformer means being able to access
variables stored in the tranformer by other transforms.
Also needed for tag-computation: The transformer knows which
version of OOo document we are processing.
"""
self.transformer = transformer
mt = self.mimetype = transformer.mimetype
self.textbody_name = self.textbody_names [mt]
self.paragraph_props = self.paragraph_props [mt]
self.properties_tag = self.oootag ('style', self.paragraph_props)
self.textbody_tag = self.oootag ('office', self.textbody_name)
self.font_decls_tag = self.oootag ('office', self.font_decls [mt])
# end def register
def oootag (self, namespace, name) :
""" Compute long tag version """
return OOo_Tag (namespace, name, self.mimetype)
# end def oootag
def set (self, variable, value) :
""" Set variable in our transformer using naming convention. """
self.transformer [self._varname (variable)] = value
# end def set
def _varname (self, name) :
""" For fulfilling the naming convention of the transformer
dictionary (every entry in this dictionary should be prefixed
with the class name of the transform) we have this
convenience method.
Returns variable name prefixed with own class name.
"""
return ":".join ((self.__class__.__name__, name))
# end def _varname
# end class Transform
class Transformer (autosuper) :
"""
Class for applying a set of transforms to a given ooopy object.
The transforms are applied to the specified file in priority
order. When applying transforms we have a mechanism for
communication of transforms. We give the transformer to the
individual transforms as a parameter. The transforms may use the
transformer like a dictionary for storing values and retrieving
values left by previous transforms.
As a naming convention each transform should use its class name
as a prefix for storing values in the dictionary.
>>> import Transforms
>>> from Transforms import renumber_all, get_meta, set_meta, meta_counts
>>> try :
... from io import BytesIO
... except ImportError :
... from StringIO import StringIO as BytesIO
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/test.sxw', outfile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> body [-1].get (OOo_Tag ('text', 'style-name', mimetype = m))
'Standard'
>>> def cb (name) :
... r = { 'street' : 'Beispielstrasse 42'
... , 'firstname' : 'Hugo'
... , 'salutation' : 'Frau'
... }
... return r.get (name, None)
...
>>> p = get_meta (m)
>>> t = Transformer (m, p)
>>> t ['a'] = 'a'
>>> print (t ['a'])
a
>>> t.transform (o)
>>> p.set ('a', 'b')
>>> print (t ['Attribute_Access:a'])
b
>>> o = OOoPy (infile = 'testfiles/test.sxw', outfile = sio)
>>> t = Transformer (
... m
... , Transforms.Autoupdate ()
... , Transforms.Editinfo ()
... , Transforms.Field_Replace (prio = 99, replace = cb)
... , Transforms.Field_Replace
... ( replace =
... { 'salutation' : ''
... , 'firstname' : 'Erika'
... , 'lastname' : 'Musterfrau'
... , 'country' : 'D'
... , 'postalcode' : '00815'
... , 'city' : 'Niemandsdorf'
... }
... )
... , Transforms.Addpagebreak_Style ()
... , Transforms.Addpagebreak ()
... )
>>> t.transform (o)
>>> o.close ()
>>> ov = sio.getvalue ()
>>> f = open ("testout.sxw", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> c = o.read ('content.xml')
>>> m = o.mimetype
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> vset = './/' + OOo_Tag ('text', 'variable-set', mimetype = m)
>>> for node in body.findall (vset) :
... name = node.get (OOo_Tag ('text', 'name', m))
... print (name, ':', node.text)
salutation : None
firstname : Erika
lastname : Musterfrau
street : Beispielstrasse 42
country : D
postalcode : 00815
city : Niemandsdorf
salutation : None
firstname : Erika
lastname : Musterfrau
street : Beispielstrasse 42
country : D
postalcode : 00815
city : Niemandsdorf
>>> body [-1].get (OOo_Tag ('text', 'style-name', mimetype = m))
'P2'
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/test.sxw', outfile = sio)
>>> c = o.read ('content.xml')
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Addpagebreak_Style ()
... , Transforms.Mailmerge
... ( iterator =
... ( dict (firstname = 'Erika', lastname = 'Nobody')
... , dict (firstname = 'Eric', lastname = 'Wizard')
... , cb
... )
... )
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform (o)
>>> for i in meta_counts :
... print (i, t [':'.join (('Set_Attribute', i))])
character-count 951
image-count 0
object-count 0
page-count 3
paragraph-count 113
table-count 3
word-count 162
>>> name = t ['Addpagebreak_Style:stylename']
>>> print (name)
P2
>>> o.close ()
>>> ov = sio.getvalue ()
>>> f = open ("testout2.sxw", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', m))
>>> for n in body.findall ('.//*') :
... zidx = n.get (OOo_Tag ('draw', 'z-index', m))
... if zidx :
... print (':'.join(split_tag (n.tag)), zidx)
draw:text-box 0
draw:rect 1
draw:text-box 3
draw:rect 4
draw:text-box 6
draw:rect 7
draw:text-box 2
draw:text-box 5
draw:text-box 8
>>> for n in body.findall ('.//' + OOo_Tag ('text', 'p', m)) :
... if n.get (OOo_Tag ('text', 'style-name', m)) == name :
... print (n.tag)
{http://openoffice.org/2000/text}p
{http://openoffice.org/2000/text}p
>>> vset = './/' + OOo_Tag ('text', 'variable-set', m)
>>> for n in body.findall (vset) :
... if n.get (OOo_Tag ('text', 'name', m), None).endswith ('name') :
... name = n.get (OOo_Tag ('text', 'name', m))
... print (name, ':', n.text)
firstname : Erika
lastname : Nobody
firstname : Eric
lastname : Wizard
firstname : Hugo
lastname : Testman
firstname : Erika
lastname : Nobody
firstname : Eric
lastname : Wizard
firstname : Hugo
lastname : Testman
>>> for n in body.findall ('.//' + OOo_Tag ('draw', 'text-box', m)) :
... print (n.get (OOo_Tag ('draw', 'name', m)), end = ' ')
... print (n.get (OOo_Tag ('text', 'anchor-page-number', m)))
Frame1 1
Frame2 2
Frame3 3
Frame4 None
Frame5 None
Frame6 None
>>> for n in body.findall ('.//' + OOo_Tag ('text', 'section', m)) :
... print (n.get (OOo_Tag ('text', 'name', m)))
Section1
Section2
Section3
Section4
Section5
Section6
Section7
Section8
Section9
Section10
Section11
Section12
Section13
Section14
Section15
Section16
Section17
Section18
>>> for n in body.findall ('.//' + OOo_Tag ('table', 'table', m)) :
... print (n.get (OOo_Tag ('table', 'name', m)))
Table1
Table2
Table3
>>> r = o.read ('meta.xml')
>>> meta = r.find ('.//' + OOo_Tag ('meta', 'document-statistic', m))
>>> for i in meta_counts :
... print (i, repr (meta.get (OOo_Tag ('meta', i, m))))
character-count '951'
image-count '0'
object-count '0'
page-count '3'
paragraph-count '113'
table-count '3'
word-count '162'
>>> o.close ()
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/test.sxw', outfile = sio)
>>> tf = ('testfiles/test.sxw', 'testfiles/rechng.sxw')
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Concatenate (*tf)
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform (o)
>>> for i in meta_counts :
... print (i, repr (t [':'.join (('Set_Attribute', i))]))
character-count '1131'
image-count '0'
object-count '0'
page-count '3'
paragraph-count '168'
table-count '2'
word-count '160'
>>> o.close ()
>>> ov = sio.getvalue ()
>>> f = open ("testout3.sxw", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> s = o.read ('styles.xml')
>>> for n in c.findall ('./*/*') :
... name = n.get (OOo_Tag ('style', 'name', m))
... if name :
... parent = n.get (OOo_Tag ('style', 'parent-style-name', m))
... print ('"%s", "%s"' % (name, parent))
"Tahoma1", "None"
"Bitstream Vera Sans", "None"
"Tahoma", "None"
"Nimbus Roman No9 L", "None"
"Courier New", "None"
"Arial Black", "None"
"New Century Schoolbook", "None"
"Helvetica", "None"
"Table1", "None"
"Table1.A", "None"
"Table1.A1", "None"
"Table1.E1", "None"
"Table1.A2", "None"
"Table1.E2", "None"
"P1", "None"
"fr1", "Frame"
"fr2", "None"
"fr3", "Frame"
"Sect1", "None"
"gr1", "None"
"P2", "Standard"
"Standard_Concat", "None"
"Concat_P1", "Concat_Frame contents"
"Concat_P2", "Concat_Frame contents"
"P3", "Concat_Frame contents"
"P4", "Concat_Frame contents"
"P5", "Concat_Standard"
"P6", "Concat_Standard"
"P7", "Concat_Frame contents"
"P8", "Concat_Frame contents"
"P9", "Concat_Frame contents"
"P10", "Concat_Frame contents"
"P11", "Concat_Frame contents"
"P12", "Concat_Frame contents"
"P13", "Concat_Frame contents"
"P15", "Concat_Standard"
"P16", "Concat_Standard"
"P17", "Concat_Standard"
"P18", "Concat_Standard"
"P19", "Concat_Standard"
"P20", "Concat_Standard"
"P21", "Concat_Standard"
"P22", "Concat_Standard"
"P23", "Concat_Standard"
"T1", "None"
"Concat_fr1", "Concat_Frame"
"Concat_fr2", "Concat_Frame"
"Concat_fr3", "Concat_Frame"
"fr4", "Concat_Frame"
"fr5", "Concat_Frame"
"fr6", "Concat_Frame"
"Concat_Sect1", "None"
"N0", "None"
"N2", "None"
"P15_Concat", "Concat_Standard"
>>> for n in s.findall ('./*/*') :
... name = n.get (OOo_Tag ('style', 'name', m))
... if name :
... parent = n.get (OOo_Tag ('style', 'parent-style-name', m))
... print ('"%s", "%s"' % (name, parent))
"Tahoma1", "None"
"Bitstream Vera Sans", "None"
"Tahoma", "None"
"Nimbus Roman No9 L", "None"
"Courier New", "None"
"Arial Black", "None"
"New Century Schoolbook", "None"
"Helvetica", "None"
"Standard", "None"
"Text body", "Standard"
"List", "Text body"
"Table Contents", "Text body"
"Table Heading", "Table Contents"
"Caption", "Standard"
"Frame contents", "Text body"
"Index", "Standard"
"Frame", "None"
"OLE", "None"
"Concat_Standard", "None"
"Concat_Text body", "Concat_Standard"
"Concat_List", "Concat_Text body"
"Concat_Caption", "Concat_Standard"
"Concat_Frame contents", "Concat_Text body"
"Concat_Index", "Concat_Standard"
"Horizontal Line", "Concat_Standard"
"Internet link", "None"
"Visited Internet Link", "None"
"Concat_Frame", "None"
"Concat_OLE", "None"
"pm1", "None"
"Concat_pm1", "None"
"Standard", "None"
"Concat_Standard", "None"
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'variable-decl', m)) :
... name = n.get (OOo_Tag ('text', 'name', m))
... print (name)
salutation
firstname
lastname
street
country
postalcode
city
date
invoice.invoice_no
invoice.abo.aboprice.abotype.description
address.salutation
address.title
address.firstname
address.lastname
address.function
address.street
address.country
address.postalcode
address.city
invoice.subscriber.salutation
invoice.subscriber.title
invoice.subscriber.firstname
invoice.subscriber.lastname
invoice.subscriber.function
invoice.subscriber.street
invoice.subscriber.country
invoice.subscriber.postalcode
invoice.subscriber.city
invoice.period_start
invoice.period_end
invoice.currency.name
invoice.amount
invoice.subscriber.initial
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'sequence-decl', m)) :
... name = n.get (OOo_Tag ('text', 'name', m))
... print (name)
Illustration
Table
Text
Drawing
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'p', m)) :
... name = n.get (OOo_Tag ('text', 'style-name', m))
... if not name or name.startswith ('Concat') :
... print (">%s<" % name)
>Concat_P1<
>Concat_P2<
>Concat_Frame contents<
>>> for n in c.findall ('.//' + OOo_Tag ('draw', 'text-box', m)) :
... attrs = 'name', 'style-name', 'z-index'
... attrs = [n.get (OOo_Tag ('draw', i, m)) for i in attrs]
... attrs.append (n.get (OOo_Tag ('text', 'anchor-page-number', m)))
... print (attrs)
['Frame1', 'fr1', '0', '1']
['Frame2', 'fr1', '3', '2']
['Frame3', 'Concat_fr1', '6', '3']
['Frame4', 'Concat_fr2', '7', '3']
['Frame5', 'Concat_fr3', '8', '3']
['Frame6', 'Concat_fr1', '9', '3']
['Frame7', 'fr4', '10', '3']
['Frame8', 'fr4', '11', '3']
['Frame9', 'fr4', '12', '3']
['Frame10', 'fr4', '13', '3']
['Frame11', 'fr4', '14', '3']
['Frame12', 'fr4', '15', '3']
['Frame13', 'fr5', '16', '3']
['Frame14', 'fr4', '18', '3']
['Frame15', 'fr4', '19', '3']
['Frame16', 'fr4', '20', '3']
['Frame17', 'fr6', '17', '3']
['Frame18', 'fr4', '23', '3']
['Frame19', 'fr3', '2', None]
['Frame20', 'fr3', '5', None]
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'section', m)) :
... attrs = 'name', 'style-name'
... attrs = [n.get (OOo_Tag ('text', i, m)) for i in attrs]
... print (attrs)
['Section1', 'Sect1']
['Section2', 'Sect1']
['Section3', 'Sect1']
['Section4', 'Sect1']
['Section5', 'Sect1']
['Section6', 'Sect1']
['Section7', 'Concat_Sect1']
['Section8', 'Concat_Sect1']
['Section9', 'Concat_Sect1']
['Section10', 'Concat_Sect1']
['Section11', 'Concat_Sect1']
['Section12', 'Concat_Sect1']
['Section13', 'Concat_Sect1']
['Section14', 'Concat_Sect1']
['Section15', 'Concat_Sect1']
['Section16', 'Concat_Sect1']
['Section17', 'Concat_Sect1']
['Section18', 'Concat_Sect1']
['Section19', 'Concat_Sect1']
['Section20', 'Concat_Sect1']
['Section21', 'Concat_Sect1']
['Section22', 'Concat_Sect1']
['Section23', 'Concat_Sect1']
['Section24', 'Concat_Sect1']
['Section25', 'Concat_Sect1']
['Section26', 'Concat_Sect1']
['Section27', 'Concat_Sect1']
['Section28', 'Sect1']
['Section29', 'Sect1']
['Section30', 'Sect1']
['Section31', 'Sect1']
['Section32', 'Sect1']
['Section33', 'Sect1']
>>> for n in c.findall ('.//' + OOo_Tag ('draw', 'rect', m)) :
... attrs = 'style-name', 'text-style-name', 'z-index'
... attrs = [n.get (OOo_Tag ('draw', i, m)) for i in attrs]
... attrs.append (n.get (OOo_Tag ('text', 'anchor-page-number', m)))
... print (attrs)
['gr1', 'P1', '1', '1']
['gr1', 'P1', '4', '2']
>>> for n in c.findall ('.//' + OOo_Tag ('draw', 'line', m)) :
... attrs = 'style-name', 'text-style-name', 'z-index'
... attrs = [n.get (OOo_Tag ('draw', i, m)) for i in attrs]
... print (attrs)
['gr1', 'P1', '24']
['gr1', 'P1', '22']
['gr1', 'P1', '21']
>>> for n in s.findall ('.//' + OOo_Tag ('style', 'style', m)) :
... if n.get (OOo_Tag ('style', 'name', m)).startswith ('Co') :
... attrs = 'name', 'class', 'family'
... attrs = [n.get (OOo_Tag ('style', i, m)) for i in attrs]
... print (attrs)
... props = n.find ('./' + OOo_Tag ('style', 'properties', m))
... if props is not None and len (props) :
... props [0].tag
['Concat_Standard', 'text', 'paragraph']
'{http://openoffice.org/2000/style}tab-stops'
['Concat_Text body', 'text', 'paragraph']
['Concat_List', 'list', 'paragraph']
['Concat_Caption', 'extra', 'paragraph']
['Concat_Frame contents', 'extra', 'paragraph']
['Concat_Index', 'index', 'paragraph']
['Concat_Frame', None, 'graphics']
['Concat_OLE', None, 'graphics']
>>> for n in c.findall ('.//*') :
... zidx = n.get (OOo_Tag ('draw', 'z-index', m))
... if zidx :
... print (':'.join(split_tag (n.tag)), zidx)
draw:text-box 0
draw:rect 1
draw:text-box 3
draw:rect 4
draw:text-box 6
draw:text-box 7
draw:text-box 8
draw:text-box 9
draw:text-box 10
draw:text-box 11
draw:text-box 12
draw:text-box 13
draw:text-box 14
draw:text-box 15
draw:text-box 16
draw:text-box 18
draw:text-box 19
draw:text-box 20
draw:text-box 17
draw:text-box 23
draw:line 24
draw:text-box 2
draw:text-box 5
draw:line 22
draw:line 21
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/carta.stw', outfile = sio)
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Addpagebreak_Style ()
... , Transforms.Mailmerge
... ( iterator =
... ( dict
... ( Spett = "Spettabile"
... , contraente = "First person"
... , indirizzo = "street? 1"
... , tipo = "racc. A.C."
... , luogo = "Varese"
... , oggetto = "Saluti"
... )
... , dict
... ( Spett = "Egregio"
... , contraente = "Second Person"
... , indirizzo = "street? 2"
... , tipo = "Raccomandata"
... , luogo = "Gavirate"
... , oggetto = "Ossequi"
... )
... )
... )
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform(o)
>>> o.close()
>>> ov = sio.getvalue ()
>>> f = open ("carta-out.stw", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> vset = './/' + OOo_Tag ('text', 'variable-set', mimetype = m)
>>> for node in body.findall (vset) :
... name = node.get (OOo_Tag ('text', 'name', m))
... print (name, ':', node.text)
Spett : Spettabile
contraente : First person
indirizzo : street? 1
Spett : Egregio
contraente : Second Person
indirizzo : street? 2
tipo : racc. A.C.
luogo : Varese
oggetto : Saluti
tipo : Raccomandata
luogo : Gavirate
oggetto : Ossequi
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/test.odt', outfile = sio)
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Addpagebreak_Style ()
... , Transforms.Mailmerge
... ( iterator =
... ( dict (firstname = 'Erika', lastname = 'Nobody')
... , dict (firstname = 'Eric', lastname = 'Wizard')
... , cb
... )
... )
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform (o)
>>> for i in meta_counts :
... print (i, t [':'.join (('Set_Attribute', i))])
character-count 951
image-count 0
object-count 0
page-count 3
paragraph-count 53
table-count 3
word-count 162
>>> name = t ['Addpagebreak_Style:stylename']
>>> print (name)
P2
>>> o.close ()
>>> ov = sio.getvalue ()
>>> f = open ("testout.odt", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', m))
>>> for n in body.findall ('.//*') :
... zidx = n.get (OOo_Tag ('draw', 'z-index', m))
... if zidx :
... print (':'.join(split_tag (n.tag)), zidx)
draw:frame 0
draw:rect 1
draw:frame 3
draw:rect 4
draw:frame 6
draw:rect 7
draw:frame 2
draw:frame 5
draw:frame 8
>>> for n in body.findall ('.//' + OOo_Tag ('text', 'p', m)) :
... if n.get (OOo_Tag ('text', 'style-name', m)) == name :
... print (n.tag)
{urn:oasis:names:tc:opendocument:xmlns:text:1.0}p
{urn:oasis:names:tc:opendocument:xmlns:text:1.0}p
>>> vset = './/' + OOo_Tag ('text', 'variable-set', m)
>>> for n in body.findall (vset) :
... if n.get (OOo_Tag ('text', 'name', m), None).endswith ('name') :
... name = n.get (OOo_Tag ('text', 'name', m))
... print (name, ':', n.text)
firstname : Erika
lastname : Nobody
firstname : Eric
lastname : Wizard
firstname : Hugo
lastname : Testman
firstname : Erika
lastname : Nobody
firstname : Eric
lastname : Wizard
firstname : Hugo
lastname : Testman
>>> for n in body.findall ('.//' + OOo_Tag ('draw', 'frame', m)) :
... print (n.get (OOo_Tag ('draw', 'name', m)), end = ' ')
... print (n.get (OOo_Tag ('text', 'anchor-page-number', m)))
Frame1 1
Frame2 2
Frame3 3
Frame4 None
Frame5 None
Frame6 None
>>> for n in body.findall ('.//' + OOo_Tag ('text', 'section', m)) :
... print (n.get (OOo_Tag ('text', 'name', m)))
Section1
Section2
Section3
Section4
Section5
Section6
Section7
Section8
Section9
Section10
Section11
Section12
Section13
Section14
Section15
Section16
Section17
Section18
>>> for n in body.findall ('.//' + OOo_Tag ('table', 'table', m)) :
... print (n.get (OOo_Tag ('table', 'name', m)))
Table1
Table2
Table3
>>> r = o.read ('meta.xml')
>>> meta = r.find ('.//' + OOo_Tag ('meta', 'document-statistic', m))
>>> for i in meta_counts :
... print (i, repr (meta.get (OOo_Tag ('meta', i, m))))
character-count '951'
image-count '0'
object-count '0'
page-count '3'
paragraph-count '53'
table-count '3'
word-count '162'
>>> o.close ()
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/carta.odt', outfile = sio)
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Addpagebreak_Style ()
... , Transforms.Mailmerge
... ( iterator =
... ( dict
... ( Spett = "Spettabile"
... , contraente = "First person"
... , indirizzo = "street? 1"
... , tipo = "racc. A.C."
... , luogo = "Varese"
... , oggetto = "Saluti"
... )
... , dict
... ( Spett = "Egregio"
... , contraente = "Second Person"
... , indirizzo = "street? 2"
... , tipo = "Raccomandata"
... , luogo = "Gavirate"
... , oggetto = "Ossequi"
... )
... )
... )
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform(o)
>>> o.close()
>>> ov = sio.getvalue ()
>>> f = open ("carta-out.odt", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> vset = './/' + OOo_Tag ('text', 'variable-set', mimetype = m)
>>> for node in body.findall (vset) :
... name = node.get (OOo_Tag ('text', 'name', m))
... print (name, ':', node.text)
Spett : Spettabile
contraente : First person
indirizzo : street? 1
Spett : Egregio
contraente : Second Person
indirizzo : street? 2
tipo : racc. A.C.
luogo : Varese
oggetto : Saluti
tipo : Raccomandata
luogo : Gavirate
oggetto : Ossequi
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/test.odt', outfile = sio)
>>> tf = ('testfiles/test.odt', 'testfiles/rechng.odt')
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Concatenate (*tf)
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform (o)
>>> for i in meta_counts :
... print (i, repr (t [':'.join (('Set_Attribute', i))]))
character-count '1131'
image-count '0'
object-count '0'
page-count '3'
paragraph-count '80'
table-count '2'
word-count '159'
>>> o.close ()
>>> ov = sio.getvalue ()
>>> f = open ("testout3.odt", "wb")
>>> dum = f.write (ov)
>>> f.close ()
>>> o = OOoPy (infile = sio)
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> s = o.read ('styles.xml')
>>> for n in c.findall ('./*/*') :
... name = n.get (OOo_Tag ('style', 'name', m))
... if name :
... parent = n.get (OOo_Tag ('style', 'parent-style-name', m))
... print ('"%s", "%s"' % (name, parent))
"Tahoma1", "None"
"Bitstream Vera Sans", "None"
"Tahoma", "None"
"Nimbus Roman No9 L", "None"
"Courier New", "None"
"Arial Black", "None"
"New Century Schoolbook", "None"
"Times New Roman", "None"
"Arial", "None"
"Helvetica", "None"
"Table1", "None"
"Table1.A", "None"
"Table1.A1", "None"
"Table1.E1", "None"
"Table1.A2", "None"
"Table1.E2", "None"
"P1", "None"
"fr1", "Frame"
"fr2", "Frame"
"Sect1", "None"
"gr1", "None"
"P2", "Standard"
"Standard_Concat", "None"
"Concat_P1", "Concat_Frame_20_contents"
"Concat_P2", "Concat_Frame_20_contents"
"P3", "Concat_Frame_20_contents"
"P4", "Concat_Standard"
"P5", "Concat_Standard"
"P6", "Concat_Frame_20_contents"
"P7", "Concat_Frame_20_contents"
"P8", "Concat_Frame_20_contents"
"P9", "Concat_Frame_20_contents"
"P10", "Concat_Frame_20_contents"
"P11", "Concat_Frame_20_contents"
"P12", "Concat_Frame_20_contents"
"P14", "Concat_Standard"
"P15", "Concat_Standard"
"P16", "Concat_Standard"
"P17", "Concat_Standard"
"P18", "Concat_Standard"
"P19", "Concat_Standard"
"P20", "Concat_Standard"
"P21", "Concat_Standard"
"P22", "Concat_Standard"
"P23", "Concat_Standard"
"Concat_fr1", "Frame"
"Concat_fr2", "Frame"
"fr3", "Frame"
"fr4", "Frame"
"fr5", "Frame"
"fr6", "Frame"
"Concat_gr1", "None"
"N0", "None"
"N2", "None"
"P14_Concat", "Concat_Standard"
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'variable-decl', m)) :
... name = n.get (OOo_Tag ('text', 'name', m))
... print (name)
salutation
firstname
lastname
street
country
postalcode
city
date
invoice.invoice_no
invoice.abo.aboprice.abotype.description
address.salutation
address.title
address.firstname
address.lastname
address.function
address.street
address.country
address.postalcode
address.city
invoice.subscriber.salutation
invoice.subscriber.title
invoice.subscriber.firstname
invoice.subscriber.lastname
invoice.subscriber.function
invoice.subscriber.street
invoice.subscriber.country
invoice.subscriber.postalcode
invoice.subscriber.city
invoice.period_start
invoice.period_end
invoice.currency.name
invoice.amount
invoice.subscriber.initial
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'sequence-decl', m)) :
... name = n.get (OOo_Tag ('text', 'name', m))
... print (name)
Illustration
Table
Text
Drawing
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'p', m)) :
... name = n.get (OOo_Tag ('text', 'style-name', m))
... if not name or name.startswith ('Concat') :
... print (':'.join(split_tag (n.tag)), ">%s<" % name)
text:p >None<
text:p >None<
text:p >Concat_P1<
text:p >Concat_P1<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_P2<
text:p >Concat_Frame_20_contents<
text:p >None<
text:p >None<
text:p >None<
>>> for n in c.findall ('.//' + OOo_Tag ('draw', 'frame', m)) :
... attrs = 'name', 'style-name', 'z-index'
... attrs = [n.get (OOo_Tag ('draw', i, m)) for i in attrs]
... attrs.append (n.get (OOo_Tag ('text', 'anchor-page-number', m)))
... print (attrs)
['Frame1', 'fr1', '0', '1']
['Frame2', 'fr1', '3', '2']
['Frame3', 'Concat_fr1', '6', '3']
['Frame4', 'Concat_fr2', '7', '3']
['Frame5', 'fr3', '8', '3']
['Frame6', 'Concat_fr1', '9', '3']
['Frame7', 'fr4', '10', '3']
['Frame8', 'fr4', '11', '3']
['Frame9', 'fr4', '12', '3']
['Frame10', 'fr4', '13', '3']
['Frame11', 'fr4', '14', '3']
['Frame12', 'fr4', '15', '3']
['Frame13', 'fr5', '16', '3']
['Frame14', 'fr4', '18', '3']
['Frame15', 'fr4', '19', '3']
['Frame16', 'fr4', '20', '3']
['Frame17', 'fr6', '17', '3']
['Frame18', 'fr4', '23', '3']
['Frame19', 'fr2', '2', None]
['Frame20', 'fr2', '5', None]
>>> for n in c.findall ('.//' + OOo_Tag ('text', 'section', m)) :
... attrs = 'name', 'style-name'
... attrs = [n.get (OOo_Tag ('text', i, m)) for i in attrs]
... print (attrs)
['Section1', 'Sect1']
['Section2', 'Sect1']
['Section3', 'Sect1']
['Section4', 'Sect1']
['Section5', 'Sect1']
['Section6', 'Sect1']
['Section7', 'Sect1']
['Section8', 'Sect1']
['Section9', 'Sect1']
['Section10', 'Sect1']
['Section11', 'Sect1']
['Section12', 'Sect1']
['Section13', 'Sect1']
['Section14', 'Sect1']
['Section15', 'Sect1']
['Section16', 'Sect1']
['Section17', 'Sect1']
['Section18', 'Sect1']
['Section19', 'Sect1']
['Section20', 'Sect1']
['Section21', 'Sect1']
['Section22', 'Sect1']
['Section23', 'Sect1']
['Section24', 'Sect1']
['Section25', 'Sect1']
['Section26', 'Sect1']
['Section27', 'Sect1']
['Section28', 'Sect1']
['Section29', 'Sect1']
['Section30', 'Sect1']
['Section31', 'Sect1']
['Section32', 'Sect1']
['Section33', 'Sect1']
>>> for n in c.findall ('.//' + OOo_Tag ('draw', 'rect', m)) :
... attrs = 'style-name', 'text-style-name', 'z-index'
... attrs = [n.get (OOo_Tag ('draw', i, m)) for i in attrs]
... attrs.append (n.get (OOo_Tag ('text', 'anchor-page-number', m)))
... print (attrs)
['gr1', 'P1', '1', '1']
['gr1', 'P1', '4', '2']
>>> for n in c.findall ('.//' + OOo_Tag ('draw', 'line', m)) :
... attrs = 'style-name', 'text-style-name', 'z-index'
... attrs = [n.get (OOo_Tag ('draw', i, m)) for i in attrs]
... print (attrs)
['Concat_gr1', 'P1', '24']
['Concat_gr1', 'P1', '22']
['Concat_gr1', 'P1', '21']
>>> for n in s.findall ('.//' + OOo_Tag ('style', 'style', m)) :
... if n.get (OOo_Tag ('style', 'name', m)).startswith ('Co') :
... attrs = 'name', 'display-name', 'class', 'family'
... attrs = [n.get (OOo_Tag ('style', i, m)) for i in attrs]
... print (attrs)
... props = n.find ('./' + OOo_Tag ('style', 'properties', m))
... if props is not None and len (props) :
... props [0].tag
['Concat_Standard', None, 'text', 'paragraph']
['Concat_Text_20_body', 'Concat Text body', 'text', 'paragraph']
['Concat_List', None, 'list', 'paragraph']
['Concat_Caption', None, 'extra', 'paragraph']
['Concat_Frame_20_contents', 'Concat Frame contents', 'extra', 'paragraph']
['Concat_Index', None, 'index', 'paragraph']
>>> for n in c.findall ('.//*') :
... zidx = n.get (OOo_Tag ('draw', 'z-index', m))
... if zidx :
... print (':'.join(split_tag (n.tag)), zidx)
draw:frame 0
draw:rect 1
draw:frame 3
draw:rect 4
draw:frame 6
draw:frame 7
draw:frame 8
draw:frame 9
draw:frame 10
draw:frame 11
draw:frame 12
draw:frame 13
draw:frame 14
draw:frame 15
draw:frame 16
draw:frame 18
draw:frame 19
draw:frame 20
draw:frame 17
draw:frame 23
draw:line 24
draw:frame 2
draw:frame 5
draw:line 22
draw:line 21
>>> import os, sys
>>> oldpath = os.environ ['PYTHONPATH']
>>> os.environ ['PYTHONPATH'] = '.'
>>> os.system ('python%s bin/ooo_fieldreplace -i testfiles/test.odt '
... '-o testout.odt '
... 'salutation=Frau firstname=Erika lastname=Musterfrau '
... 'country=D postalcode=00815 city=Niemandsdorf '
... 'street="Beispielstrasse 42"' % sys.version_info [0])
0
>>> os.environ ['PYTHONPATH'] = oldpath
>>> o = OOoPy (infile = 'testout.odt')
>>> c = o.read ('content.xml')
>>> m = o.mimetype
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> vset = './/' + OOo_Tag ('text', 'variable-set', mimetype = m)
>>> for node in body.findall (vset) :
... name = node.get (OOo_Tag ('text', 'name', m))
... print (name, ':', node.text)
salutation : Frau
firstname : Erika
lastname : Musterfrau
street : Beispielstrasse 42
country : D
postalcode : 00815
city : Niemandsdorf
salutation : Frau
firstname : Erika
lastname : Musterfrau
street : Beispielstrasse 42
country : D
postalcode : 00815
city : Niemandsdorf
>>> o.close ()
>>> os.environ ['PYTHONPATH'] = '.'
>>> os.system ("python%s bin/ooo_mailmerge "
... "-o testout.odt -d'|' testfiles/carta.odt "
... "testfiles/x.csv" % sys.version_info [0])
0
>>> os.environ ['PYTHONPATH'] = oldpath
>>> o = OOoPy (infile = 'testout.odt')
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> vset = './/' + OOo_Tag ('text', 'variable-set', mimetype = m)
>>> for node in body.findall (vset) :
... name = node.get (OOo_Tag ('text', 'name', m))
... print (name, ':', node.text)
Spett : Spettabile
contraente : First person
indirizzo : street? 1
Spett : Egregio
contraente : Second Person
indirizzo : street? 2
tipo : racc. A.C.
luogo : Varese
oggetto : Saluti
tipo : Raccomandata
luogo : Gavirate
oggetto : Ossequi
>>> o.close ()
>>> infile = 'testfiles/testenum.odt'
>>> o = OOoPy (infile = infile, outfile = 'xyzzy.odt')
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Addpagebreak_Style ()
... , Transforms.Mailmerge
... ( iterator =
... ( dict (firstname = 'Erika', lastname = 'Nobody')
... , dict (firstname = 'Eric', lastname = 'Wizard')
... , cb
... )
... )
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... )
>>> t.transform (o)
>>> o.close ()
>>> o = OOoPy (infile = 'xyzzy.odt')
>>> m = o.mimetype
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> textlist = './/' + OOo_Tag ('text', 'list', m)
>>> for node in body.findall (textlist) :
... id = node.get (OOo_Tag ('xml', 'id', m))
... print ('xml:id', ':', id)
xml:id : list1
xml:id : list2
xml:id : list3
>>> o = OOoPy (infile = 'testfiles/page1.odt', outfile = 'xyzzy.odt')
>>> m = o.mimetype
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Concatenate ('testfiles/page2.odt')
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... , Transforms.Manifest_Append ()
... )
>>> t.transform (o)
>>> o.close ()
>>> o = OOoPy (infile = 'xyzzy.odt')
>>> c = o.read ('META-INF/manifest.xml')
>>> for node in c.getroot () :
... fe = node.get (OOo_Tag ('manifest', 'full-path', m))
... print (fe)
/
Pictures/10000000000000C80000007941B1A419.jpg
Pictures/10000000000000DC000000B02E191635.jpg
Pictures/10000000000000DC000000A337377AAA.jpg
meta.xml
settings.xml
content.xml
Thumbnails/thumbnail.png
layout-cache
manifest.rdf
Configurations2/accelerator/current.xml
Configurations2/
styles.xml
>>> for f in sorted (o.izip.infolist (), key = lambda x: x.filename) :
... print (f.filename)
Configurations2/accelerator/current.xml
Configurations2/images/Bitmaps/
META-INF/manifest.xml
Pictures/10000000000000C80000007941B1A419.jpg
Pictures/10000000000000DC000000A337377AAA.jpg
Pictures/10000000000000DC000000B02E191635.jpg
Thumbnails/thumbnail.png
content.xml
layout-cache
manifest.rdf
meta.xml
mimetype
settings.xml
styles.xml
>>> sio = BytesIO ()
>>> o = OOoPy (infile = 'testfiles/tbl_first.odt', outfile = sio)
>>> m = o.mimetype
>>> t = Transformer (
... o.mimetype
... , get_meta (o.mimetype)
... , Transforms.Concatenate ('testfiles/tbl_second.odt')
... , renumber_all (o.mimetype)
... , set_meta (o.mimetype)
... , Transforms.Fix_OOo_Tag ()
... , Transforms.Manifest_Append ()
... )
>>> t.transform (o)
>>> o.close ()
>>> o = OOoPy (infile = sio)
>>> c = o.read ('content.xml')
>>> body = c.find (OOo_Tag ('office', 'body', mimetype = m))
>>> tbls = './/' + OOo_Tag ('table', 'table', mimetype = m)
>>> for table in body.findall (tbls) :
... name = table.get (OOo_Tag ('table', 'style-name', mimetype = m))
... if name :
... print (name)
... for t in table.findall ('.//') :
... name = t.get (OOo_Tag ('table', 'style-name', mimetype = m))
... if name :
... print (name)
Tabella1
Tabella1.A
Tabella1.A1
Tabella1.B1
Tabella1.A2
Tabella1.B2
Tabella1
Tabella1.A
Tabella1.A1
Tabella1.B1
Tabella1.A2
Tabella1.B2
"""
def __init__ (self, mimetype, *tf) :
assert (mimetype in mimetypes)
self.mimetype = mimetype
self.transforms = {}
for t in tf :
self.insert (t)
self.dictionary = {}
# 2-tuples of filename, content
self.appendfiles = []
# end def __init__
def insert (self, transform) :
"""Insert a new transform"""
t = transform
if t.prio not in self.transforms :
self.transforms [t.prio] = []
self.transforms [t.prio].append (t)
t.register (self)
# end def append
def transform (self, ooopy) :
"""
Apply all the transforms in priority order.
Priority order is global over all transforms.
"""
self.trees = {}
for f in files :
self.trees [f] = ooopy.read (f)
#self.dictionary = {} # clear dict when transforming another ooopy
for p in sorted (self.transforms.keys ()) :
for t in self.transforms [p] :
t.apply_all (self.trees)
for t in self.trees :
e = self.trees [t]
e.write ()
for fname, fcontent in self.appendfiles :
e.ooopy.append_file (fname, fcontent)
# end def transform
def __contains__ (self, key) :
return key in self.dictionary
# end def __contains__
def __getitem__ (self, key) :
return self.dictionary [key]
# end def __getitem__
def __setitem__ (self, key, value) :
self.dictionary [key] = value
# end def __setitem__
# end class Transformer | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/db/models/query_utils.py | from __future__ import unicode_literals
from django.db.backends import util
from django.utils import six
from django.utils import tree
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
def __init__(self, sql, params):
self.data = sql, params
def as_sql(self, qn=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(six.iteritems(kwargs)))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
from django.db.models.fields import FieldDoesNotExist
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field_by_name(self.field_name)[0]
except FieldDoesNotExist:
f = [f for f in opts.fields
if f.attname == self.field_name][0]
name = f.name
# Lets see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be
# called here.
val = getattr(
non_deferred_model._base_manager.only(name).using(
instance._state.db).get(pk=instance.pk),
self.field_name
)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field_by_name(name)[0]
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.rel:
return False
if field.rel.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.name not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
proxy = True
app_label = model._meta.app_label
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
name = util.truncate_name(name, 80, 32)
overrides = dict([(attr, DeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True | PypiClean |
/DLStudio-2.3.0.tar.gz/DLStudio-2.3.0/Examples/object_detection_and_localization.py |
## object_detection_and_localization.py
"""
This script shows how you can use the functionality provided by the inner class
DetectAndLocalize of the DLStudio module for experimenting with object detection and
localization.
Detecting and localizing objects in images is a more difficult problem than just
classifying the objects. The former requires that your CNN make two different types
of inferences simultaneously, one for classification and the other for localization.
For the localization part, the CNN must carry out what is known as regression. What
that means is that the CNN must output the numerical values for the bounding box that
encloses the object that was detected. Generating these two types of inferences
requires two different loss functions, one for classification and the other for
regression.
Training a CNN to solve the detection and localization problem requires a dataset
that, in addition to the class labels for the objects, also provides bounding-box
annotations for the objects in the images. As you see in the code below, this
script uses the PurdueShapes5 dataset for that purpose.
"""
import random
import numpy
import torch
import os, sys
"""
seed = 0
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
torch.backends.cudnn.deterministic=True
torch.backends.cudnn.benchmarks=False
os.environ['PYTHONHASHSEED'] = str(seed)
"""
## watch -d -n 0.5 nvidia-smi
from DLStudio import *
dls = DLStudio(
# dataroot = "/home/kak/ImageDatasets/PurdueShapes5/",
dataroot = "./data/PurdueShapes5/",
image_size = [32,32],
path_saved_model = "./saved_model",
momentum = 0.9,
learning_rate = 1e-4,
epochs = 2,
batch_size = 4,
classes = ('rectangle','triangle','disk','oval','star'),
# use_gpu = True,
)
detector = DLStudio.DetectAndLocalize( dl_studio = dls )
dataserver_train = DLStudio.DetectAndLocalize.PurdueShapes5Dataset(
train_or_test = 'train',
dl_studio = dls,
dataset_file = "PurdueShapes5-10000-train.gz",
)
dataserver_test = DLStudio.DetectAndLocalize.PurdueShapes5Dataset(
train_or_test = 'test',
dl_studio = dls,
dataset_file = "PurdueShapes5-1000-test.gz"
)
detector.dataserver_train = dataserver_train
detector.dataserver_test = dataserver_test
detector.load_PurdueShapes5_dataset(dataserver_train, dataserver_test)
model = detector.LOADnet2(skip_connections=True, depth=8)
number_of_learnable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("\n\nThe number of learnable parameters in the model: %d" % number_of_learnable_params)
num_layers = len(list(model.parameters()))
print("\nThe number of layers in the model: %d\n\n" % num_layers)
detector.run_code_for_training_with_CrossEntropy_and_MSE_Losses(model)
import pymsgbox
response = pymsgbox.confirm("Finished training. Start testing on unseen data?")
if response == "OK":
detector.run_code_for_testing_detection_and_localization(model) | PypiClean |
/Flask-MDBootstrap-3.0.5.tar.gz/Flask-MDBootstrap-3.0.5/flask_mdbootstrap/static/MDB-Pro/src/js/vendor/pro/jquery.sticky.js | ;(function($) {
$.fn.sticky = function(options) {
var defaults = {
topSpacing: 0, // No spacing by default
zIndex: '', // No default z-index
stopper: '.sticky-stopper', // Default stopper class, also accepts number value
stickyClass: false // Class applied to element when it's stuck
};
var settings = $.extend({}, defaults, options); // Accepts custom stopper id or class
// Checks if custom z-index was defined
function checkIndex() {
if (typeof settings.zIndex == 'number') {
return true;
} else {
return false;
}
}
var hasIndex = checkIndex(); // True or false
// Checks if a stopper exists in the DOM or number defined
function checkStopper() {
if (0 < $(settings.stopper).length || typeof settings.stopper === 'number') {
return true;
} else {
return false;
}
}
var hasStopper = checkStopper(); // True or false
return this.each(function() {
var $this = $(this);
var thisHeight = $this.outerHeight();
var thisWidth = $this.outerWidth();
var topSpacing = settings.topSpacing;
var zIndex = settings.zIndex;
var pushPoint = $this.offset().top - topSpacing; // Point at which the sticky element starts pushing
var placeholder = $('<div></div>').width(thisWidth).height(thisHeight).addClass('sticky-placeholder'); // Cache a clone sticky element
var stopper = settings.stopper;
var $window = $(window);
function stickyScroll() {
var windowTop = $window.scrollTop(); // Check window's scroll position
var stopPoint = stopper;
var parentWidth = $this.parent().width();
placeholder.width(parentWidth)
if ( hasStopper && typeof stopper === 'string' ) {
var stopperTop = $(stopper).offset().top;
stopPoint = (stopperTop - thisHeight) - topSpacing;
}
if (pushPoint < windowTop) {
// Create a placeholder for sticky element to occupy vertical real estate
if(settings.stickyClass)
$this.addClass(settings.stickyClass);
$this.after(placeholder).css({
position: 'fixed',
top: topSpacing,
width: parentWidth
});
if (hasIndex) {
$this.css({
zIndex: zIndex
});
}
if (hasStopper) {
if (stopPoint < windowTop) {
var diff = (stopPoint - windowTop) + topSpacing;
$this.css({
top: diff
});
}
}
} else {
if(settings.stickyClass)
$this.removeClass(settings.stickyClass);
$this.css({
position: 'static',
top: null,
left: null,
width: 'auto'
});
placeholder.remove();
}
}
if($window.innerHeight() > thisHeight) {
$window.bind('scroll', stickyScroll);
$window.bind('load', stickyScroll);
$window.bind('resize', stickyScroll);
}
});
};
})(jQuery); | PypiClean |
/GraphCASE-0.0.9.tar.gz/GraphCASE-0.0.9/GAE/graph_case_controller.py | import pickle
from datetime import datetime
import numpy as np
import tensorflow as tf
from GAE.model import GraphAutoEncoderModel
from GAE.input_layer_constructor import InputLayerConstructor
from GAE.graph_reconstructor import GraphReconstructor
from GAE.transformation_layer import DecTransLayer, EncTransLayer, Hub0_encoder, Hub0Decoder
from GAE.data_feeder_nx import DataFeederNx
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class GraphAutoEncoder:
"""
This class implement the graphCase algorithm. Refer for more details
to the corresponding documentation.
Args:
graph: graph on which the embedding is trained. Only bi-directed
graphs re supported.
learning_rate: learning rate of the MLP.
support_size: list with number of sampled edges per layer. The
current implementation only support one size for all layers
dims: list with the dimension per layer.
batch_size: number of nodes per training cycle.
max_total_steps: Number of batches used for training the mlp.
validate_iter: Number of batches between a validation batch.
verbose: boolean if True then detailed feedback on the training progress
is given.
seed: Seed used for the random split in train and test set.
"""
def __init__(self,
graph=None,
learning_rate=0.0001,
support_size=[2, 2],
dims=[32, 32, 32, 32],
hub0_feature_with_neighb_dim=None,
batch_size=3,
verbose=False,
seed=1,
weight_label='weight',
encoder_labels=None,
act=tf.nn.sigmoid,
useBN=False,
val_fraction=0.3,
model_config=None,
dropout=False,
data_feeder_cls=DataFeederNx,
pos_enc_cls=None
):
self.learning_rate = learning_rate
self.dims = dims
self.hub0_feature_with_neighb_dim = hub0_feature_with_neighb_dim
self.batch_size = batch_size
self.support_size = support_size
self.verbose = verbose
self.seed = seed
self.act = act
self.weight_label = weight_label
self.encoder_labels = encoder_labels
self.useBN = useBN
self.dropout = dropout
self.val_fraction = val_fraction
self.data_feeder_cls = data_feeder_cls
self.pos_enc_cls = pos_enc_cls
self.mpu, self.cpu = self.determine_mpu()
if graph is not None:
self.__consistency_checks()
self.sampler = self.__init_sampler(graph, val_fraction, pos_enc_cls)
self.model = self.__init_model()
if model_config is not None:
custom_objects = {
"DecTransLayer": DecTransLayer,
"EncTransLayer": EncTransLayer,
"Hub0_encoder": Hub0_encoder,
"Hub0_decoder": Hub0Decoder
}
with tf.keras.utils.custom_object_scope(custom_objects):
self.model = GraphAutoEncoderModel.from_config(model_config)
def __init_sampler(self, graph, val_fraction, pos_enc_cls):
"""
Initialises the datafeeder
"""
with tf.device(self.cpu):
return InputLayerConstructor(
graph, support_size=self.support_size, val_fraction=val_fraction,
batch_size=self.batch_size, verbose=self.verbose, seed=self.seed,
weight_label=self.weight_label, encoder_labels=self.encoder_labels,
data_feeder_cls=self.data_feeder_cls, pos_enc_cls=pos_enc_cls
)
def __init_model(self):
"""
Initialises the model
"""
with tf.device(self.cpu):
model = GraphAutoEncoderModel(
self.dims, self.support_size, self.sampler.get_feature_size(),
hub0_feature_with_neighb_dim=self.hub0_feature_with_neighb_dim,
number_of_node_labels=self.sampler.get_number_of_node_labels(),
verbose=self.verbose, seed=self.seed, dropout=self.dropout, act=self.act,
useBN=self.useBN)
optimizer = tf.optimizers.Adam(learning_rate=self.learning_rate)
optimizer = tf.optimizers.RMSprop(learning_rate=self.learning_rate)
model.compile(optimizer=optimizer, loss='mse')
self.sampler.init_train_batch()
train_data = self.sampler.get_train_samples()
for n in train_data.take(1):
model(n[0])
return model
def calculate_embeddings(self, graph=None, nodes=None, verbose=False):
"""
Calculated the embedding of the nodes specified. If no nodes are
specified, then the embedding for all nodes are calculated.
Args:
graph: Optionally the graph for which the embeddings need to be calculated. If set to
None then the graph used for initializing is used.
nodes: Optionally a list of node ids in the graph for which the
embedding needs to be calculated.
Returns:
A 2d numpy array with one embedding per row.
"""
self.verbose = verbose
if verbose:
print("calculating all embeddings")
with tf.device(self.cpu):
if graph is not None:
self.sampler = self.__init_sampler(graph, self.val_fraction, self.pos_enc_cls)
embedding = None
counter = 0
for i in self.sampler.init_incr_batch(nodes):
counter += 1
try:
with tf.device(self.mpu):
embed = self.model.calculate_embedding(i)
if embedding is None:
embedding = embed
else:
embedding = np.vstack([embedding, embed])
if counter % 100 == 0:
print("processed ", counter, " batches time: ", datetime.now())
except tf.errors.OutOfRangeError:
break
if verbose:
print("reached end of batch")
return embedding
def save_model(self, save_path):
"""
Saves the model. Note that a reloaded model can only be called.
Args:
save_path: path in which the model is stored.
"""
self.model.save(save_path)
attr_dict = {
"learning_rate": self.learning_rate,
"dims": self.dims,
"hub0_feature_with_neighb_dim": self.hub0_feature_with_neighb_dim,
"batch_size": self.batch_size,
"support_size": self.support_size,
"verbose": self.verbose,
"seed": self.seed,
"act": self.act,
"weight_label": self.weight_label,
"useBN": self.useBN,
"val_fraction": self.val_fraction,
# "model_config": self.model.get_config()
}
pickle.dump(attr_dict, open(f"{save_path}/params.pickle", "wb"))
@classmethod
def load_model(cls, save_path):
"""
Loads a trained model from a pickle file.
Note that the restored model can only be called.
Args:
filename: path with the stored model.
"""
params = pickle.load(open(f"{save_path}/params.pickle", "rb"))
new_gae = cls(graph=None, **params)
new_gae.model = tf.keras.models.load_model("saved_model")
return new_gae
def save_weights(self, save_path):
"""
Saves the weight of the model. These weight can be used to reconstructed the model for
those cases where the model will be updated or changed
Args:
save_path: The path where the weights are saved to.
"""
self.model.save_weights(save_path)
def load_weights(self, save_path):
"""
Loads earlier saved weights back into the model. Note that we assume that the model has
the same configuration as the model of the saved weights
Args:
save_path: The folder containing the saved weights.
"""
self.model.load_weights(save_path)
def __consistency_checks(self):
"""
Performs the following consistency checks.
1) len of dims list is 2 * len support size or len is 2 * support size + 1
"""
assert len(self.dims) == 2 * len(self.support_size) or \
len(self.dims) -1 == 2 * len(self.support_size), \
f"number of dims {len(self.dims)} does not match with two times the number of " \
f"support sizes {len(self.support_size)}"
def fit(self, graph=None, verbose=None, layer_wise=False, epochs=4):
"""
Trains the model.
Args:
graph: The graph used for training. If None then the graph for initializing the model
is used.
verbose: Boolean to indicate whether information during training needs to be shown.
layer_wise: Boolean to indicate whether the model needs to trained layer by layer or
all at once.
epochs: Number of epochs used for training.
Returns:
Dict with the training results.
"""
with tf.device(self.cpu):
hist = {}
if verbose is not None:
self.verbose = verbose
model_verbose = 1 if self.verbose else 0
if graph is not None:
self.sampler = self.__init_sampler(graph, self.val_fraction)
layers = [None]
if layer_wise:
layers = [i for i in range(len(self.dims))] + layers
for _, l in enumerate(layers):
with tf.device(self.cpu):
self.model.sub_model_layer = l
self.sampler.init_train_batch()
train_data = self.sampler.get_train_samples()
validation_data = self.sampler.get_val_samples()
train_epoch_size, val_epoch_size = self.sampler.get_epoch_sizes()
steps_per_epoch = int(train_epoch_size / self.batch_size)
assert steps_per_epoch>0, "batch_size greater then 1 train epoch"
validation_steps = int(val_epoch_size / self.batch_size)
assert validation_steps>0, "batch_size greater then 1 validation epoch"
# early_stop = tf.keras.callbacks.EarlyStopping(
# monitor='val_loss', min_delta=0, patience=3, verbose=0
# )
with tf.device(self.mpu):
history = self.model.fit(
train_data,
validation_data=validation_data,
epochs=epochs,
verbose=model_verbose,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
# callbacks=[early_stop]
)
hist[l] = history
return hist
def fit_supervised(
self, label_name, model, compiler_dict, train_dict, graph=None, verbose=None):
"""
expends the GAE with a supervised model and trains the model and the given graph or if
none is provide the current graph.
Args:
label_name: The name of the node label containing the label information.
model: The supervised part of the model. The output of the encoder is fed into the
supervised part.
compiler_dict: Dict with the parameter to be used for compiling the model.
train_dict: Dict with the training parameter to be used for training the model.
graph The graph on which the model will be trained.
"""
if verbose is not None:
self.verbose = verbose
model_verbose = 1 if self.verbose else 0
if graph is not None:
self.sampler = self.__init_sampler(graph, self.val_fraction)
self.model.create_supervised_model(model)
self.model.compile(**compiler_dict)
self.sampler.init_train_batch(label_name)
train_data = self.sampler.get_supervised_train_samples()
validation_data = self.sampler.get_supervised_val_samples()
train_epoch_size, val_epoch_size = self.sampler.get_epoch_sizes()
steps_per_epoch = int(train_epoch_size / self.batch_size)
validation_steps = int(val_epoch_size / self.batch_size)
early_stop = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=3, verbose=0
)
history = self.model.fit(
train_data,
validation_data=validation_data,
verbose=model_verbose,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=[early_stop],
**train_dict
)
return history
def get_l1_structure(self, node_id, graph=None, verbose=None, show_graph=False,
node_label=None, get_pyvis=False, deduplicate=True,
delta=0.0001, dummy=0, fraction_sim=1.0):
"""
Retrieve the input layer and corresponding sampled graph of the local neighbourhood.
Args:
node_id: id of the node for which the input layer is calculated
graph: graph used for sampling. If no graph is specified then the current graph
is used.
show_graph Boolean indicating if a plot of the graph needs to be generated.
node_label Label used for plotting the nodes. If None then the node id is used.
returns:
a networkx graph of the sampled neighbourhood and a numpy matrix of the input layer.
"""
if verbose is not None:
self.verbose = verbose
if graph is not None:
self.sampler = self.__init_sampler(graph, self.val_fraction)
inputlayer, _ = self.sampler.get_input_layer([node_id], hub=1)
target = self.sampler.get_features(node_id)
graph_rec = GraphReconstructor(deduplicate=deduplicate, delta=delta, dummy=dummy, fraction_sim=fraction_sim)
recon_graph = graph_rec.reconstruct_graph(target, inputlayer, self.support_size, pos_encoding_size=self.sampler.pos_enc_length)
if show_graph:
graph_rec.show_graph(recon_graph, node_label=node_label)
if get_pyvis:
nt = graph_rec.show_pyvis(recon_graph, node_label=node_label)
return inputlayer, recon_graph, nt
return inputlayer, recon_graph
def decode(self, embedding, incl_graph=None, delta=0.0001, dummy=0, deduplicate=True, fraction_sim=1.0):
"""
Decodes the given embedding into a node and local neighbourhood.
Args:
embedding: Embedding of the node
incl_graph :{None | pyvis | graph }
delta: Min difference between reconstructed feature value and dummy value. Nodes
with a smaller difference are considered dummy nodes and are removed.
dummy: Value of the dummy node.
Returns:
A tuple with the node labels, inputlayer and optionally a graph.
"""
feat_out, df_out = self.model.decode(embedding)
# create dummy feat_out in case hub0 features are not included.
if feat_out is None:
feat_out = tf.constant([1,1,len(self.encoder_labels)])
if incl_graph is not None:
graph_rec = GraphReconstructor(delta=delta, dummy=dummy, deduplicate=deduplicate, fraction_sim=fraction_sim)
recon_graph = graph_rec.reconstruct_graph(feat_out, df_out, self.support_size, pos_encoding_size=self.sampler.pos_enc_length)
if incl_graph == 'graph':
return feat_out, df_out, recon_graph
if incl_graph == 'pyvis':
nt = graph_rec.show_pyvis(recon_graph)
return feat_out, df_out, nt
return feat_out, df_out, None
def determine_mpu(self):
""" determine the gpu and cpu name"""
devices = tf.config.list_logical_devices()
GPUs = [d for d in devices if d.device_type=='GPU']
CPUs = [d for d in devices if d.device_type=='CPU']
if len(GPUs)>0:
return (GPUs[0].name, CPUs[0].name)
else:
return (CPUs[0].name, CPUs[0].name) | PypiClean |
/MOM-Tapyr-1.6.2.tar.gz/MOM-Tapyr-1.6.2/_Attr/Coll.py |
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
from _MOM import MOM
from _TFL import TFL
import _TFL._Meta.M_Class
import _TFL.Decorator
class M_Coll (TFL.Meta.M_Class) :
_state_changers = ()
Table = {}
def __init__ (cls, name, bases, dct) :
cls.__m_super.__init__ (name, bases, dct)
if cls.P_Type is not None :
if cls.P_Type not in cls.Table :
cls.Table [cls.P_Type] = cls
for mn in cls._state_changers :
m = getattr (cls, mn, None)
if m is not None :
setattr (cls, mn, cls._override (m))
else :
### sub-class overrriding `attr_name`
assert cls.Table [cls.P_Type] is bases [0]
# end def __init__
@staticmethod
@TFL.Decorator
def _override (method) :
def _ (self, * args, ** kw) :
old = self.copy ()
result = method (self, * args, ** kw)
if old != self :
self.record_attr_change (old)
return result
return _
# end def _override
# end class M_Coll
class _Mixin_ (TFL.Meta.BaM (object, metaclass = M_Coll)) :
P_Type = None
attr_name = None
owner = None
_attr_man = property (lambda s : s.owner and s.owner._attr_man)
def copy (self) :
return self.__class__ (self)
# end def copy
@property
def owner_attr (self) :
"""Return the attribute (kind property) of the `owner` object that
holds `self`.
"""
if self.owner and self.attr_name :
return self.owner.attr_prop (self.attr_name)
# end def owner_attr
def record_attr_change (self, old) :
### communicate change of `self` to `self.owner`
if self.owner is not None :
self.owner.record_attr_change \
({self.attr_name : self.owner_attr.as_string (old)})
# end def record_attr_change
# end class _Mixin_
class List (_Mixin_, list) :
"""List of attribute values"""
P_Type = list
_state_changers = \
( "__delitem__", "__delslice__"
, "__iadd__", "__imul__"
, "__setitem__", "__setslice__"
, "append", "extend", "insert", "pop", "remove", "reverse", "sort"
)
# end class List
class Set (_Mixin_, set) :
"""Set of attribute values"""
P_Type = set
_state_changers = \
( "__iand__", "__ior__", "__isub__", "__ixor__"
, "add", "clear", "difference_update", "discard"
, "intersection_update", "pop", "remove"
, "symmetric_difference_update", "update"
)
# end class Set
if __name__ != "__main__" :
MOM.Attr._Export ("*")
### __END__ MOM.Attr.Coll | PypiClean |
/DiPAS-2.0.tar.gz/DiPAS-2.0/docs/source/usage/converting.ipynb | # Converting thick to thin elements
Not all lattice elements support thick tracking and so converting these elements to thin slices is necessary before doing particle tracking or optics calculations. Elements can be converted to their thin representation using the `makethin` method:
```
from dipas.build import Lattice
with Lattice({'particle': 'proton', 'beta': 0.6}) as lattice:
lattice.HKicker(kick=0.5, l=1.0, label='hk1')
lattice.Quadrupole(k1=0.625, l=5.0, label='q1')
kicker, quad = lattice
print(kicker)
print(kicker.makethin(5))
```
The `makethin` method returns a `elements.ThinElement` object, a special version of a more general `Segment`. This `ThinElement` contains the thin kicker slices as well as the drift space before, between and after the slices. The distribution of drift space depends on the selected slicing style. By default the `TEAPOT` style is used. Other available slicing styles include `SIMPLE` and `EDGE`. For more details consider the documentation of the `elements.ThinElement.create_thin_sequence` method.
Let's compare the `SIMPLE` and `EDGE` style for the quadrupole element:
```
print('EDGE', end='\n\n')
print(quad.makethin(5, style='edge'), end='\n\n')
print('SIMPLE', end='\n\n')
print(quad.makethin(5, style='simple'), end='\n\n')
```
`EDGE` places the outermost slices directly at the edges of the thick element, while `SIMPLE` adds a margin that is half the in-between distance of slices.
We can also convert whole lattices represented by `Segment` objects to thin elements. Here we can choose the number of slices as well as the style via a dict which maps element identifiers to the particular values. The identifiers can be strings for comparing element labels, regex patterns for matching element labels or lattice element types, similar to element selection via `lattice[identifier]` (see [inspecting lattices](./inspecting.html)).
```
from dipas.elements import HKicker, Quadrupole, Segment
lattice = Segment(lattice)
thin = lattice.makethin({HKicker: 2, 'q1': 5}, style={'hk1': 'edge', Quadrupole: 'simple'})
print(thin)
```
The `ThinElement`s represent their thick counterparts which are still accessible via the `.base` attribute. Also the base label is inherited (element access works as explained in [inspecting lattices](./inspecting.html)):
```
print('q1.base: ', thin['q1'].base)
print('q1.label: ', thin[1].label)
for drift in thin['q1']['q1__d*']:
print(drift)
```
We can also flatten such a nested `Segment`, containing `ThinElement`s, using the `flat` (or `flatten`) method:
```
print(thin.flat())
```
`flatten()` returns a generator over all the nested elements.
| PypiClean |
/ConGen-0.0.5.tar.gz/ConGen-0.0.5/README.md | # ConGen: Creating conservation planning datasets intuitively by playing with parameters and getting instant visualized feedback
## Requirements
For python library requirements, see pyproject.toml. Additionally, the following system libraries and development headers are required:
- basic build tools (gcc, make etc.)
-
- gdal for building the Python GDAL module
## TODOs
- [ ] Fix WardClusteredPointLayer
- [ ] CLI Interface
- [ ] Benchmark different conservation planning software
- [ ] Improve UI / UX
- [ ] Onboarding / Instructions
- [ ] Project creation wizard with predefined templates?
- [ ] Project template files
- [ ] Tooltips for parameters
## Archievements
- Object-oriented design
- All layer types are implemented as classes that inherit from common layer types, overriding common methods if needed
- wxPython GUI with Model-View-Controller Design
- LayerListCtrl controller class manages both a wxPython list control and the underlying data structure
- List view and underlying data structure are always in sync
- On parameter change: Corresponding value in underlying data structure gets automatically updated, list view and rendering are automatically refreshed
- Custom implementation for parameters
- Wrapping a single value (e.g. int, bool, string) with metadata (default, min, max values, parameter name and description, etc.)
- Stored in a list for each layer type to distinguish parameters from other layer instance variables
- Each layer type requires different parameters, extends the parameter list from inherited classes
- Automatically render appropriate parameter controls for each layer type based on parameter list
- Future: automatically render columns in layer list based on parameter list
- Caching: Specify which parameters invalidate the calculated cache, only re-calculate layer data if relevant parameters change, don't recalculate for parameters that can be applied to existing data -> minifies computational overhead, especially for applications with many layers
- Python magic so parameters of each layer can still be accessed via `layer.parameterName` despite being
a) stored in a list variable instead of being class instance members themselves
b) classes that wrap the actual value with additional metadata
## Material
### Ecology
- [BC Marine Conservation Analysis](https://bcmca.ca/)
### Procedural map generation
- [Notes on Procedural Map Generation Techniques](https://christianjmills.com/posts/procedural-map-generation-techniques-notes/index.html)
- [java - Procedural Map Generation - Stack Overflow](https://stackoverflow.com/questions/9448386/procedural-map-generation)
- [Map Generation - Procedural Content Generation Wiki](http://pcg.wikidot.com/pcg-algorithm%3amap-generation)
- [Procedural Map Generation with Godot — Part 1](https://medium.com/pumpkinbox-blog/procedural-map-generation-with-godot-part-1-1b4e78191e90)
- [A Guide to Procedural Generation - GameDev Academy](https://gamedevacademy.org/procedural-2d-maps-unity-tutorial/)
- [Procedural Map | Rust Wiki | Fandom](https://rust.fandom.com/wiki/Procedural_Map)
- [Map generator - Minetest Wiki](https://wiki.minetest.net/Map_generator)
#### Perlin Noise
- [Noise and Turbulence - Ken's Academy Award](https://mrl.cs.nyu.edu/~perlin/doc/oscar.html) -> Ken Perlin
- [Lecture 14 Procedural Generation: Perlin Noise](https://www.cs.umd.edu/class/fall2019/cmsc425/handouts/lect14-perlin.pdf)
- [Perlin Noise - Scratchapixel](https://www.scratchapixel.com/lessons/procedural-generation-virtual-worlds/perlin-noise-part-2)
-
- [Playing with Perlin Noise: Generating Realistic Archipelagos](https://medium.com/@yvanscher/playing-with-perlin-noise-generating-realistic-archipelagos-b59f004d8401)
- [Perlin 2D Noise in python](https://engineeredjoy.com/blog/perlin-noise/)
- [Exploring Perlin Noise in Python](https://samclane.dev/Perlin-Noise-Python/)
- [Perlin noise in python - Stack Overflow](https://stackoverflow.com/questions/71040845/perlin-noise-in-python)
-
##### Python Libraries
Not exhaustive
- [EasyPerlinNoise](https://pypi.org/project/EasyPerlinNoise/)
- [pythonperlin](https://pypi.org/project/pythonperlin/)
- [perlin](https://pypi.org/project/perlin/)
- [perlin-numpy](https://github.com/pvigier/perlin-numpy)
- [perlin-cupy](https://pypi.org/project/perlin-cupy/)
- [vec-noise](https://pypi.org/project/vec-noise/)
- [noise](https://pypi.org/project/noise/)
- [noise-randomized](https://pypi.org/project/noise-randomized/)
- [perlin-noise](https://pypi.org/project/perlin-noise/)
- [nPerlinNoise](https://pypi.org/project/nPerlinNoise/)
- [pyfastnoisesimd](https://pypi.org/project/pyfastnoisesimd/)
- [pyperlin](https://pypi.org/project/pyperlin/)
- [shades](https://pypi.org/project/shades/)
- [opensimplex](https://pypi.org/project/opensimplex/)
- [noisemaker](https://pypi.org/project/noisemaker/) -> Olsen Noise(?)
- [pyramage](https://pypi.org/project/pyramage/)
- [processing](https://py.processing.org/reference/noise.html)
- [perlin.py](https://gist.github.com/eevee/26f547457522755cb1fb8739d0ea89a1) | PypiClean |
/Code_groupe1_laguilhon-0.0.1.tar.gz/Code_groupe1_laguilhon-0.0.1/README.md | General description
The objective of this project is to create a photofit picture software. The idea is to present several images with faces to the user. The user will have to choose one or more images that is/are closest to the person they wish to report. Each time, a genetic algorithm is followed by a decoder to form new images depending on the chosen images. The decoder comes from a Variationate AutoEncoder(VAE) trained on a database of portraits (CelebrA). This AI is used to generate new images derived from the previously selected images. After several iterations of image selection-generation, a portrait established by the AI can be validated by the user, thus creating the final photofit image. This image can then be exported to a pdf along with the information of the user.
We select 607 images from the datasets CelebA to create a new dataset which will be presented to the user. We chose images with a plain background because the autoencoder work better for this type of images.
Development objectives
This is the third version of our project. It works but could use several ameliorations. First and foremost, it could use a better version of the genetic algorithm and AutoEncoder to have better quality images because ours are very blurry. We could also use another database to have better images. Indeed, the AutoEncoder works better with a plain background. Another improvement could be linked to the graphical interface : you can always make it more intuitive and user-friendly.
Requirements for the development environment to be integrated
The software can be installed on the exploitation system Linux or Mac. It needs an environment python 3. Because of its relatively important size, the user needs some space.
Instructions for installation and use
For the installation you have to run a bash script. Then, to use the software it is very simple, the user can follow the tutorial using the button “Tutoriel” in the first page. The user will then begin choosing one or more images. If no initial image is suitable, they can choose to re-initialise the images. After the choosing phase, the algorithm will generate new images with those that have been selected. You can now select one or more images and so on. When one image is suitable to what the user desires, they can stop the process by validating this image. It can be saved in a pdf document along with other information on the user.
List of the technologies used and, if applicable, links to other information about these technologies
Genetic algorithm : The genetic algorithm makes modifications on the selected images and it mixes them if the user chooses several. The mutations and the crossing-over are made on the selected images in their form vectors (= encoded form).
Autoencoder : The autoencoder combines an encoder and a decoder. The autoencoder was trained with 30 000 images and 50 epochs. You can find the summary of our autoencoder here :
Structure of the encoder:
Layer (type) Output Shape
=================================================================
conv2d (Conv2D) (None, 64, 64, 8)
conv2d_1 (Conv2D) (None, 32, 32, 4)
flatten (Flatten) (None, 4096)
dense (Dense) (None, 4096)
dense_1 (Dense) (None, 4096)
Layer (type) Output Shape
=================================================================
Structure of the decoder:
=================================================================
dense_3 (Dense) (None, 4096)
dense_4 (Dense) (None, 4096)
reshape (Reshape) (None, 32, 32, 4)
conv2d (Conv2DTranspose) (None, 32, 32, 4)
conv2d (Conv2DTranspose) (None, 64, 64, 8)
conv2d_3 (Conv2D) (None, 128, 128, 3)
=================================================================
Known bugs and possible corrections:
The import Tensorflow has a known warning message asking the user to rebuild it in order to use it at its best. Nevertheless, we weren’t able to rebuild it because of modules’ versions confrontation, and chose to ignore this warning.
The Tensorflow installation is very long therefore we recommend the User to have it beforehand and not create a virtual environment.(Else, the installation will take about 45 min.)
The conversion of the genetic algorithm is bad. We need to retrain the autoencoder and to adapt the genetic algorithm to have better results.
Sometimes there are errors like:
AttributeError : module ‘numpy’ has no attribute ‘object’
ImportError: cannot import name ‘ImageTk’ from ‘PIL’
These errors can be solved by upgrading the package: pip install package --upgrade
There can also be these errors:
No module named ‘fpdf’
No module named ‘skimage’
No module named ‘tensorflow’
No module named ‘PIL’
No module named ‘cv2’
Respectively, they can be solved doing in the terminal:
pip install fpdf
pip install scikit-image
pip install tensorflow
pip install pillow
pip install opencv-python
FAQ
Because this is the first version presented to the Client, we don’t have any questions to relate yet.
Copyright and licensing information
See licence.txt
| PypiClean |
/LQTAgridPy-0.4.tar.gz/LQTAgridPy-0.4/src/grid_generate.py |
import matrix_generate
import utils
class GridGenerate():
def __init__(self, coordinates, dimensions, atp, files, step):
dataFile = open(files).read().splitlines()
matrices = []
minimos = [999999.0,999999.0,999999.0]
maximos = [-999999.0,-999999.0,-999999.0]
#for fileGro, fileItp, fileTop in utils.pairwise(dataFile):
for fileGro, fileTop, fileItp in utils.triplewise(dataFile):
matrix = matrix_generate.MatrixGenerate(fileGro, fileTop, fileItp)
minimos[0] = min(minimos[0],matrix.minimos[0])
minimos[1] = min(minimos[1],matrix.minimos[1])
minimos[2] = min(minimos[2],matrix.minimos[2])
maximos[0] = max(maximos[0],matrix.maximos[0])
maximos[1] = max(maximos[1],matrix.maximos[1])
maximos[2] = max(maximos[2],matrix.maximos[2])
matrices.append(matrix)
if coordinates != ():
x0, y0, z0 = coordinates
else:
x0 = int(minimos[0])-5
y0 = int(minimos[1])-5
z0 = int(minimos[2])-5
if dimensions != ():
dim_x, dim_y, dim_z = dimensions
else:
dim_x = int(maximos[0]-minimos[0])+10
dim_y = int(maximos[1]-minimos[1])+10
dim_z = int(maximos[2]-minimos[2])+10
if not step == 1:
I = int((dim_x/step)+(1/step-1))
J = int((dim_y/step)+(1/step-1))
K = int((dim_z/step)+(1/step-1))
else:
I = dim_x + 1
J = dim_y + 1
K = dim_z + 1
n = len(atp)
coulomb = ""
lj = ""
for i in range(I):
for j in range(J):
for k in range(K):
for l in range(n):
value_x = i*step+x0
value_y = j*step+y0
value_z = k*step+z0
coulomb += "%.2f_%.2f_%.2f_%s_C: \t" % (value_x, value_y,
value_z, atp[l])
lj += "%.2f_%.2f_%.2f_%s_LJ: \t" % (value_x, value_y,
value_z, atp[l])
self.output = coulomb + lj
for matrix in matrices:
matrix.gridGenerate(I, J, K, atp, x0, y0, z0, step)
valuesCoulomb = matrix.getMatrix("C")
valuesLj = matrix.getMatrix("L")
self.output += "\n" + valuesCoulomb + valuesLj
def saveGrid(self,output):
arq = open(output, "w")
arq.write(self.output)
arq.close() | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/experimental/dynamics/_rk_tableau.py |
from typing import Callable, Optional, Tuple
import jax
import jax.numpy as jnp
from netket.utils.struct import dataclass
from netket.utils.types import Array, PyTree
default_dtype = jnp.float64
def expand_dim(tree: PyTree, sz: int):
"""
creates a new pytree with same structure as input `tree`, but where very leaf
has an extra dimension at 0 with size `sz`.
"""
def _expand(x):
return jnp.zeros((sz,) + x.shape, dtype=x.dtype)
return jax.tree_map(_expand, tree)
@dataclass
class TableauRKExplicit:
r"""
Class representing the Butcher tableau of an explicit Runge-Kutta method [1,2],
which, given the ODE dy/dt = F(t, y), updates the solution as
.. math::
y_{t+dt} = y_t + \sum_l b_l k_l
with the intermediate slopes
.. math::
k_l = F(t + c_l dt, y_t + \sum_{m < l} a_{lm} k_m).
If :code:`self.is_adaptive`, the tableau also contains the coefficients :math:`b'_l`
which can be used to estimate the local truncation error by the formula
.. math::
y_{\mathrm{err}} = \sum_l (b_l - b'_l) k_l.
[1] https://en.wikipedia.org/w/index.php?title=Runge%E2%80%93Kutta_methods&oldid=1055669759
[2] J. Stoer and R. Bulirsch, Introduction to Numerical Analysis, Springer NY (2002).
"""
order: Tuple[int, int]
"""The order of the tableau"""
a: jax.numpy.ndarray
b: jax.numpy.ndarray
c: jax.numpy.ndarray
c_error: Optional[jax.numpy.ndarray]
"""Coefficients for error estimation."""
@property
def is_explicit(self):
jnp.allclose(self.a, jnp.tril(self.a)) # check if lower triangular
@property
def is_adaptive(self):
return self.b.ndim == 2
@property
def is_fsal(self):
"""Returns True if the first iteration is the same as last."""
# TODO: this is not yet supported
return False
@property
def stages(self):
"""
Number of stages (equal to the number of evaluations of the ode function)
of the RK scheme.
"""
return len(self.c)
@property
def error_order(self):
"""
Returns the order of the embedded error estimate for a tableau
supporting adaptive step size. Otherwise, None is returned.
"""
if not self.is_adaptive:
return None
else:
return self.order[1]
def _compute_slopes(
self,
f: Callable,
t: float,
dt: float,
y_t: Array,
):
"""Computes the intermediate slopes k_l."""
times = t + self.c * dt
# TODO: Use FSAL
k = expand_dim(y_t, self.stages)
for l in range(self.stages):
dy_l = jax.tree_map(
lambda k: jnp.tensordot(
jnp.asarray(self.a[l], dtype=k.dtype), k, axes=1
),
k,
)
y_l = jax.tree_map(
lambda y_t, dy_l: jnp.asarray(y_t + dt * dy_l, dtype=dy_l.dtype),
y_t,
dy_l,
)
k_l = f(times[l], y_l, stage=l)
k = jax.tree_map(lambda k, k_l: k.at[l].set(k_l), k, k_l)
return k
def step(
self,
f: Callable,
t: float,
dt: float,
y_t: Array,
):
"""Perform one fixed-size RK step from `t` to `t + dt`."""
k = self._compute_slopes(f, t, dt, y_t)
b = self.b[0] if self.b.ndim == 2 else self.b
y_tp1 = jax.tree_map(
lambda y_t, k: y_t
+ jnp.asarray(dt, dtype=y_t.dtype)
* jnp.tensordot(jnp.asarray(b, dtype=k.dtype), k, axes=1),
y_t,
k,
)
return y_tp1
def step_with_error(
self,
f: Callable,
t: float,
dt: float,
y_t: Array,
):
"""
Perform one fixed-size RK step from `t` to `t + dt` and additionally return the
error vector provided by the adaptive solver.
"""
if not self.is_adaptive:
raise RuntimeError(f"{self} is not adaptive")
k = self._compute_slopes(f, t, dt, y_t)
y_tp1 = jax.tree_map(
lambda y_t, k: y_t
+ jnp.asarray(dt, dtype=y_t.dtype)
* jnp.tensordot(jnp.asarray(self.b[0], dtype=k.dtype), k, axes=1),
y_t,
k,
)
db = self.b[0] - self.b[1]
y_err = jax.tree_map(
lambda k: jnp.asarray(dt, dtype=k.dtype)
* jnp.tensordot(jnp.asarray(db, dtype=k.dtype), k, axes=1),
k,
)
return y_tp1, y_err
@dataclass
class NamedTableau:
name: str
data: TableauRKExplicit
def __repr__(self) -> str:
return self.name
# fmt: off
# flake8: noqa: E123, E126, E201, E202, E221, E226, E231, E241, E251
# Fixed Step methods
bt_feuler = TableauRKExplicit(
order = (1,),
a = jnp.zeros((1,1), dtype=default_dtype),
b = jnp.ones((1,), dtype=default_dtype),
c = jnp.zeros((1), dtype=default_dtype),
c_error = None,
)
bt_feuler = NamedTableau("Euler", bt_feuler)
bt_midpoint = TableauRKExplicit(
order = (2,),
a = jnp.array([[0, 0],
[1/2, 0]], dtype=default_dtype),
b = jnp.array( [0, 1], dtype=default_dtype),
c = jnp.array( [0, 1/2], dtype=default_dtype),
c_error = None,
)
bt_midpoint = NamedTableau("Midpoint", bt_midpoint)
bt_heun = TableauRKExplicit(
order = (2,),
a = jnp.array([[0, 0],
[1, 0]], dtype=default_dtype),
b = jnp.array( [1/2, 1/2], dtype=default_dtype),
c = jnp.array( [0, 1], dtype=default_dtype),
c_error = None,
)
bt_heun = NamedTableau("Heun", bt_heun)
bt_rk4 = TableauRKExplicit(
order = (4,),
a = jnp.array([[0, 0, 0, 0],
[1/2, 0, 0, 0],
[0, 1/2, 0, 0],
[0, 0, 1, 0]], dtype=default_dtype),
b = jnp.array( [1/6, 1/3, 1/3, 1/6], dtype=default_dtype),
c = jnp.array( [0, 1/2, 1/2, 1], dtype=default_dtype),
c_error = None,
)
bt_rk4 = NamedTableau("RK4", bt_rk4)
# Adaptive step:
# Heun Euler https://en.wikipedia.org/wiki/Runge–Kutta_methods
bt_rk12 = TableauRKExplicit(
order = (2,1),
a = jnp.array([[0, 0],
[1, 0]], dtype=default_dtype),
b = jnp.array([[1/2, 1/2],
[1, 0]], dtype=default_dtype),
c = jnp.array( [0, 1], dtype=default_dtype),
c_error = None,
)
bt_rk12 = NamedTableau("RK12", bt_rk12)
# Bogacki–Shampine coefficients
bt_rk23 = TableauRKExplicit(
order = (2,3),
a = jnp.array([[0, 0, 0, 0],
[1/2, 0, 0, 0],
[0, 3/4, 0, 0],
[2/9, 1/3, 4/9, 0]], dtype=default_dtype),
b = jnp.array([[7/24,1/4, 1/3, 1/8],
[2/9, 1/3, 4/9, 0]], dtype=default_dtype),
c = jnp.array( [0, 1/2, 3/4, 1], dtype=default_dtype),
c_error = None,
)
bt_rk23 = NamedTableau("RK23", bt_rk23)
bt_rk4_fehlberg = TableauRKExplicit(
order = (4,5),
a = jnp.array([[ 0, 0, 0, 0, 0, 0 ],
[ 1/4, 0, 0, 0, 0, 0 ],
[ 3/32, 9/32, 0, 0, 0, 0 ],
[ 1932/2197, -7200/2197, 7296/2197, 0, 0, 0 ],
[ 439/216, -8, 3680/513, -845/4104, 0, 0 ],
[ -8/27, 2, -3544/2565, 1859/4104, 11/40, 0 ]], dtype=default_dtype),
b = jnp.array([[ 25/216, 0, 1408/2565, 2197/4104, -1/5, 0 ],
[ 16/135, 0, 6656/12825, 28561/56430, -9/50, 2/55]], dtype=default_dtype),
c = jnp.array( [ 0, 1/4, 3/8, 12/13, 1, 1/2], dtype=default_dtype),
c_error = None,
)
bt_rk4_fehlberg = NamedTableau("RK45Fehlberg", bt_rk4_fehlberg)
bt_rk4_dopri = TableauRKExplicit(
order = (5,4),
a = jnp.array([[ 0, 0, 0, 0, 0, 0, 0 ],
[ 1/5, 0, 0, 0, 0, 0, 0 ],
[ 3/40, 9/40, 0, 0, 0, 0, 0 ],
[ 44/45, -56/15, 32/9, 0, 0, 0, 0 ],
[ 19372/6561, -25360/2187, 64448/6561, -212/729, 0, 0, 0 ],
[ 9017/3168, -355/33, 46732/5247, 49/176, -5103/18656, 0, 0 ],
[ 35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0 ]], dtype=default_dtype),
b = jnp.array([[ 35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0 ],
[ 5179/57600, 0, 7571/16695, 393/640, -92097/339200, 187/2100, 1/40 ]], dtype=default_dtype),
c = jnp.array( [ 0, 1/5, 3/10, 4/5, 8/9, 1, 1], dtype=default_dtype),
c_error = None,
)
bt_rk4_dopri = NamedTableau("RK45", bt_rk4_dopri)
# fmt: on | PypiClean |
/LAMDA-SSL-1.0.2.tar.gz/LAMDA-SSL-1.0.2/LAMDA_SSL/Config/FixMatch.py | from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Transform.Vision.Normalization import Normalization
from LAMDA_SSL.Transform.ToTensor import ToTensor
from LAMDA_SSL.Transform.ToImage import ToImage
mean = [0.4914, 0.4822, 0.4465]
std = [0.2471, 0.2435, 0.2616]
pre_transform = ToImage()
transforms = None
target_transform = None
transform = Pipeline([('ToTensor', ToTensor(dtype='float',image=True)),
('Normalization', Normalization(mean=mean, std=std))
])
unlabeled_transform = Pipeline([('ToTensor', ToTensor(dtype='float',image=True)),
('Normalization', Normalization(mean=mean, std=std))
])
test_transform = Pipeline([('ToTensor', ToTensor(dtype='float',image=True)),
('Normalization', Normalization(mean=mean, std=std))
])
valid_transform = Pipeline([('ToTensor', ToTensor(dtype='float',image=True)),
('Normalization', Normalization(mean=mean, std=std))
])
train_dataset=None
labeled_dataset=LabeledDataset(pre_transform=pre_transform,transforms=transforms,
transform=transform,target_transform=target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=pre_transform,transform=unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=pre_transform,transform=valid_transform)
test_dataset=UnlabeledDataset(pre_transform=pre_transform,transform=test_transform)
# Batch sampler
train_batch_sampler=None
labeled_batch_sampler=None
unlabeled_batch_sampler=None
valid_batch_sampler=None
test_batch_sampler=None
# sampler
train_sampler=None
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
train_dataloader=None
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=10)),
('Cutout',Cutout(v=0.5,fill=(127, 127, 127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
# model
weight_decay=5e-4
ema_decay=0.999
epoch=1
num_it_total=2**20
num_it_epoch=2**20
eval_epoch=None
eval_it=None
device='cpu'
parallel=None
file=None
verbose=False
threshold=0.95
lambda_u=1
T=0.5
mu=7 | PypiClean |
/Cantiz-PyChromecast-3.2.2.tar.gz/Cantiz-PyChromecast-3.2.2/pychromecast/dial.py | from collections import namedtuple
from uuid import UUID
import logging
import requests
from .discovery import get_info_from_service, get_host_from_service_info
XML_NS_UPNP_DEVICE = "{urn:schemas-upnp-org:device-1-0}"
FORMAT_BASE_URL = "http://{}:8008"
CC_SESSION = requests.Session()
CC_SESSION.headers['content-type'] = 'application/json'
# Regular chromecast, supports video/audio
CAST_TYPE_CHROMECAST = 'cast'
# Cast Audio device, supports only audio
CAST_TYPE_AUDIO = 'audio'
# Cast Audio group device, supports only audio
CAST_TYPE_GROUP = 'group'
CAST_TYPES = {
'chromecast': CAST_TYPE_CHROMECAST,
'eureka dongle': CAST_TYPE_CHROMECAST,
'chromecast audio': CAST_TYPE_AUDIO,
'google home': CAST_TYPE_AUDIO,
'google home mini': CAST_TYPE_AUDIO,
'google cast group': CAST_TYPE_GROUP,
}
_LOGGER = logging.getLogger(__name__)
def reboot(host):
""" Reboots the chromecast. """
CC_SESSION.post(FORMAT_BASE_URL.format(host) + "/setup/reboot",
data='{"params":"now"}', timeout=10)
def _get_status(host, services, zconf, path):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
if not host:
for service in services.copy():
service_info = get_info_from_service(service, zconf)
host, _ = get_host_from_service_info(service_info)
if host:
_LOGGER.debug("Resolved service %s to %s", service, host)
break
req = CC_SESSION.get(FORMAT_BASE_URL.format(host) + path, timeout=10)
req.raise_for_status()
# The Requests library will fall back to guessing the encoding in case
# no encoding is specified in the response headers - which is the case
# for the Chromecast.
# The standard mandates utf-8 encoding, let's fall back to that instead
# if no encoding is provided, since the autodetection does not always
# provide correct results.
if req.encoding is None:
req.encoding = 'utf-8'
return req.json()
def get_device_status(host, services=None, zconf=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
try:
status = _get_status(
host, services, zconf, "/setup/eureka_info?options=detail")
friendly_name = status.get('name', "Unknown Chromecast")
model_name = "Unknown model name"
manufacturer = "Unknown manufacturer"
if 'detail' in status:
model_name = status['detail'].get('model_name', model_name)
manufacturer = status['detail'].get('manufacturer', manufacturer)
udn = status.get('ssdp_udn', None)
cast_type = CAST_TYPES.get(model_name.lower(),
CAST_TYPE_CHROMECAST)
uuid = None
if udn:
uuid = UUID(udn.replace('-', ''))
return DeviceStatus(friendly_name, model_name, manufacturer,
uuid, cast_type)
except (requests.exceptions.RequestException, OSError, ValueError):
return None
def get_multizone_status(host, services=None, zconf=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The multizone status as a named tuple.
:rtype: pychromecast.dial.MultizoneStatus or None
"""
try:
status = status = _get_status(
host, services, zconf, "/setup/eureka_info?params=multizone")
dynamic_groups = []
if 'multizone' in status and 'dynamic_groups' in status['multizone']:
for group in status['multizone']['dynamic_groups']:
name = group.get('name', "Unknown group name")
udn = group.get('uuid', None)
uuid = None
if udn:
uuid = UUID(udn.replace('-', ''))
dynamic_groups.append(MultizoneInfo(name, uuid))
groups = []
if 'multizone' in status and 'groups' in status['multizone']:
for group in status['multizone']['groups']:
name = group.get('name', "Unknown group name")
udn = group.get('uuid', None)
uuid = None
if udn:
uuid = UUID(udn.replace('-', ''))
groups.append(MultizoneInfo(name, uuid))
return MultizoneStatus(dynamic_groups, groups)
except (requests.exceptions.RequestException, OSError, ValueError):
return None
DeviceStatus = namedtuple(
"DeviceStatus",
["friendly_name", "model_name", "manufacturer", "uuid", "cast_type"])
MultizoneInfo = namedtuple(
"MultizoneInfo",
["friendly_name", "uuid"])
MultizoneStatus = namedtuple(
"MultizoneStatus",
["dynamic_groups", "groups"]) | PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/modules/feincms/project/static/scripts/libs/tiny_mce/plugins/media/editor_plugin.js | (function(){var d=tinymce.explode("id,name,width,height,style,align,class,hspace,vspace,bgcolor,type"),h=tinymce.makeMap(d.join(",")),b=tinymce.html.Node,f,a,g=tinymce.util.JSON,e;f=[["Flash","d27cdb6e-ae6d-11cf-96b8-444553540000","application/x-shockwave-flash","http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0"],["ShockWave","166b1bca-3f9c-11cf-8075-444553540000","application/x-director","http://download.macromedia.com/pub/shockwave/cabs/director/sw.cab#version=8,5,1,0"],["WindowsMedia","6bf52a52-394a-11d3-b153-00c04f79faa6,22d6f312-b0f6-11d0-94ab-0080c74c7e95,05589fa1-c356-11ce-bf01-00aa0055595a","application/x-mplayer2","http://activex.microsoft.com/activex/controls/mplayer/en/nsmp2inf.cab#Version=5,1,52,701"],["QuickTime","02bf25d5-8c17-4b23-bc80-d3488abddc6b","video/quicktime","http://www.apple.com/qtactivex/qtplugin.cab#version=6,0,2,0"],["RealMedia","cfcdaa03-8be4-11cf-b84b-0020afbbccfa","audio/x-pn-realaudio-plugin","http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0"],["Java","8ad9c840-044e-11d1-b3e9-00805f499d93","application/x-java-applet","http://java.sun.com/products/plugin/autodl/jinstall-1_5_0-windows-i586.cab#Version=1,5,0,0"],["Silverlight","dfeaf541-f3e1-4c24-acac-99c30715084a","application/x-silverlight-2"],["Iframe"],["Video"],["EmbeddedAudio"],["Audio"]];function c(m){var l,j,k;if(m&&!m.splice){j=[];for(k=0;true;k++){if(m[k]){j[k]=m[k]}else{break}}return j}return m}tinymce.create("tinymce.plugins.MediaPlugin",{init:function(n,j){var r=this,l={},m,p,q,k;function o(i){return i&&i.nodeName==="IMG"&&n.dom.hasClass(i,"mceItemMedia")}r.editor=n;r.url=j;a="";for(m=0;m<f.length;m++){k=f[m][0];q={name:k,clsids:tinymce.explode(f[m][1]||""),mimes:tinymce.explode(f[m][2]||""),codebase:f[m][3]};for(p=0;p<q.clsids.length;p++){l["clsid:"+q.clsids[p]]=q}for(p=0;p<q.mimes.length;p++){l[q.mimes[p]]=q}l["mceItem"+k]=q;l[k.toLowerCase()]=q;a+=(a?"|":"")+k}tinymce.each(n.getParam("media_types","video=mp4,m4v,ogv,webm;silverlight=xap;flash=swf,flv;shockwave=dcr;quicktime=mov,qt,mpg,mpeg;shockwave=dcr;windowsmedia=avi,wmv,wm,asf,asx,wmx,wvx;realmedia=rm,ra,ram;java=jar;audio=mp3,ogg").split(";"),function(v){var s,u,t;v=v.split(/=/);u=tinymce.explode(v[1].toLowerCase());for(s=0;s<u.length;s++){t=l[v[0].toLowerCase()];if(t){l[u[s]]=t}}});a=new RegExp("write("+a+")\\(([^)]+)\\)");r.lookup=l;n.onPreInit.add(function(){n.schema.addValidElements("object[id|style|width|height|classid|codebase|*],param[name|value],embed[id|style|width|height|type|src|*],video[*],audio[*],source[*]");n.parser.addNodeFilter("object,embed,video,audio,script,iframe",function(s){var t=s.length;while(t--){r.objectToImg(s[t])}});n.serializer.addNodeFilter("img",function(s,u,t){var v=s.length,w;while(v--){w=s[v];if((w.attr("class")||"").indexOf("mceItemMedia")!==-1){r.imgToObject(w,t)}}})});n.onInit.add(function(){if(n.theme&&n.theme.onResolveName){n.theme.onResolveName.add(function(i,s){if(s.name==="img"&&n.dom.hasClass(s.node,"mceItemMedia")){s.name="media"}})}if(n&&n.plugins.contextmenu){n.plugins.contextmenu.onContextMenu.add(function(s,t,i){if(i.nodeName==="IMG"&&i.className.indexOf("mceItemMedia")!==-1){t.add({title:"media.edit",icon:"media",cmd:"mceMedia"})}})}});n.addCommand("mceMedia",function(){var s,i;i=n.selection.getNode();if(o(i)){s=n.dom.getAttrib(i,"data-mce-json");if(s){s=g.parse(s);tinymce.each(d,function(t){var u=n.dom.getAttrib(i,t);if(u){s[t]=u}});s.type=r.getType(i.className).name.toLowerCase()}}if(!s){s={type:"flash",video:{sources:[]},params:{}}}n.windowManager.open({file:j+"/media.htm",width:430+parseInt(n.getLang("media.delta_width",0)),height:500+parseInt(n.getLang("media.delta_height",0)),inline:1},{plugin_url:j,data:s})});n.addButton("media",{title:"media.desc",cmd:"mceMedia"});n.onNodeChange.add(function(s,i,t){i.setActive("media",o(t))})},convertUrl:function(k,n){var j=this,m=j.editor,l=m.settings,o=l.url_converter,i=l.url_converter_scope||j;if(!k){return k}if(n){return m.documentBaseURI.toAbsolute(k)}return o.call(i,k,"src","object")},getInfo:function(){return{longname:"Media",author:"Moxiecode Systems AB",authorurl:"http://tinymce.moxiecode.com",infourl:"http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/media",version:tinymce.majorVersion+"."+tinymce.minorVersion}},dataToImg:function(m,k){var r=this,o=r.editor,p=o.documentBaseURI,j,q,n,l;m.params.src=r.convertUrl(m.params.src,k);q=m.video.attrs;if(q){q.src=r.convertUrl(q.src,k)}if(q){q.poster=r.convertUrl(q.poster,k)}j=c(m.video.sources);if(j){for(l=0;l<j.length;l++){j[l].src=r.convertUrl(j[l].src,k)}}n=r.editor.dom.create("img",{id:m.id,style:m.style,align:m.align,hspace:m.hspace,vspace:m.vspace,src:r.editor.theme.url+"/img/trans.gif","class":"mceItemMedia mceItem"+r.getType(m.type).name,"data-mce-json":g.serialize(m,"'")});n.width=m.width||(m.type=="audio"?"300":"320");n.height=m.height||(m.type=="audio"?"32":"240");return n},dataToHtml:function(i,j){return this.editor.serializer.serialize(this.dataToImg(i,j),{forced_root_block:"",force_absolute:j})},htmlToData:function(k){var j,i,l;l={type:"flash",video:{sources:[]},params:{}};j=this.editor.parser.parse(k);i=j.getAll("img")[0];if(i){l=g.parse(i.attr("data-mce-json"));l.type=this.getType(i.attr("class")).name.toLowerCase();tinymce.each(d,function(m){var n=i.attr(m);if(n){l[m]=n}})}return l},getType:function(m){var k,j,l;j=tinymce.explode(m," ");for(k=0;k<j.length;k++){l=this.lookup[j[k]];if(l){return l}}},imgToObject:function(z,o){var u=this,p=u.editor,C,H,j,t,I,y,G,w,k,E,s,q,A,D,m,x,l,B,F;function r(i,n){var M,L,N,K,J;J=p.getParam("flash_video_player_url",u.convertUrl(u.url+"/moxieplayer.swf"));if(J){M=p.documentBaseURI;G.params.src=J;if(p.getParam("flash_video_player_absvideourl",true)){i=M.toAbsolute(i||"",true);n=M.toAbsolute(n||"",true)}N="";L=p.getParam("flash_video_player_flashvars",{url:"$url",poster:"$poster"});tinymce.each(L,function(P,O){P=P.replace(/\$url/,i||"");P=P.replace(/\$poster/,n||"");if(P.length>0){N+=(N?"&":"")+O+"="+escape(P)}});if(N.length){G.params.flashvars=N}K=p.getParam("flash_video_player_params",{allowfullscreen:true,allowscriptaccess:true});tinymce.each(K,function(P,O){G.params[O]=""+P})}}G=z.attr("data-mce-json");if(!G){return}G=g.parse(G);q=this.getType(z.attr("class"));B=z.attr("data-mce-style");if(!B){B=z.attr("style");if(B){B=p.dom.serializeStyle(p.dom.parseStyle(B,"img"))}}if(q.name==="Iframe"){x=new b("iframe",1);tinymce.each(d,function(i){var n=z.attr(i);if(i=="class"&&n){n=n.replace(/mceItem.+ ?/g,"")}if(n&&n.length>0){x.attr(i,n)}});for(I in G.params){x.attr(I,G.params[I])}x.attr({style:B,src:G.params.src});z.replace(x);return}if(this.editor.settings.media_use_script){x=new b("script",1).attr("type","text/javascript");y=new b("#text",3);y.value="write"+q.name+"("+g.serialize(tinymce.extend(G.params,{width:z.attr("width"),height:z.attr("height")}))+");";x.append(y);z.replace(x);return}if(q.name==="Video"&&G.video.sources[0]){C=new b("video",1).attr(tinymce.extend({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B},G.video.attrs));if(G.video.attrs){l=G.video.attrs.poster}k=G.video.sources=c(G.video.sources);for(A=0;A<k.length;A++){if(/\.mp4$/.test(k[A].src)){m=k[A].src}}if(!k[0].type){C.attr("src",k[0].src);k.splice(0,1)}for(A=0;A<k.length;A++){w=new b("source",1).attr(k[A]);w.shortEnded=true;C.append(w)}if(m){r(m,l);q=u.getType("flash")}else{G.params.src=""}}if(q.name==="Audio"&&G.video.sources[0]){F=new b("audio",1).attr(tinymce.extend({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B},G.video.attrs));if(G.video.attrs){l=G.video.attrs.poster}k=G.video.sources=c(G.video.sources);if(!k[0].type){F.attr("src",k[0].src);k.splice(0,1)}for(A=0;A<k.length;A++){w=new b("source",1).attr(k[A]);w.shortEnded=true;F.append(w)}G.params.src=""}if(q.name==="EmbeddedAudio"){j=new b("embed",1);j.shortEnded=true;j.attr({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B,type:z.attr("type")});for(I in G.params){j.attr(I,G.params[I])}tinymce.each(d,function(i){if(G[i]&&i!="type"){j.attr(i,G[i])}});G.params.src=""}if(G.params.src){if(/\.flv$/i.test(G.params.src)){r(G.params.src,"")}if(o&&o.force_absolute){G.params.src=p.documentBaseURI.toAbsolute(G.params.src)}H=new b("object",1).attr({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B});tinymce.each(d,function(i){var n=G[i];if(i=="class"&&n){n=n.replace(/mceItem.+ ?/g,"")}if(n&&i!="type"){H.attr(i,n)}});for(I in G.params){s=new b("param",1);s.shortEnded=true;y=G.params[I];if(I==="src"&&q.name==="WindowsMedia"){I="url"}s.attr({name:I,value:y});H.append(s)}if(this.editor.getParam("media_strict",true)){H.attr({data:G.params.src,type:q.mimes[0]})}else{H.attr({classid:"clsid:"+q.clsids[0],codebase:q.codebase});j=new b("embed",1);j.shortEnded=true;j.attr({id:z.attr("id"),width:z.attr("width"),height:z.attr("height"),style:B,type:q.mimes[0]});for(I in G.params){j.attr(I,G.params[I])}tinymce.each(d,function(i){if(G[i]&&i!="type"){j.attr(i,G[i])}});H.append(j)}if(G.object_html){y=new b("#text",3);y.raw=true;y.value=G.object_html;H.append(y)}if(C){C.append(H)}}if(C){if(G.video_html){y=new b("#text",3);y.raw=true;y.value=G.video_html;C.append(y)}}if(F){if(G.video_html){y=new b("#text",3);y.raw=true;y.value=G.video_html;F.append(y)}}var v=C||F||H||j;if(v){z.replace(v)}else{z.remove()}},objectToImg:function(C){var L,k,F,s,M,N,y,A,x,G,E,t,q,I,B,l,K,o,H=this.lookup,m,z,v=this.editor.settings.url_converter,n=this.editor.settings.url_converter_scope,w,r,D,j;function u(i){return new tinymce.html.Serializer({inner:true,validate:false}).serialize(i)}function J(O,i){return H[(O.attr(i)||"").toLowerCase()]}function p(O){var i=O.replace(/^.*\.([^.]+)$/,"$1");return H[i.toLowerCase()||""]}if(!C.parent){return}if(C.name==="script"){if(C.firstChild){m=a.exec(C.firstChild.value)}if(!m){return}o=m[1];K={video:{},params:g.parse(m[2])};A=K.params.width;x=K.params.height}K=K||{video:{},params:{}};M=new b("img",1);M.attr({src:this.editor.theme.url+"/img/trans.gif"});N=C.name;if(N==="video"||N=="audio"){F=C;L=C.getAll("object")[0];k=C.getAll("embed")[0];A=F.attr("width");x=F.attr("height");y=F.attr("id");K.video={attrs:{},sources:[]};z=K.video.attrs;for(N in F.attributes.map){z[N]=F.attributes.map[N]}B=C.attr("src");if(B){K.video.sources.push({src:v.call(n,B,"src",C.name)})}l=F.getAll("source");for(E=0;E<l.length;E++){B=l[E].remove();K.video.sources.push({src:v.call(n,B.attr("src"),"src","source"),type:B.attr("type"),media:B.attr("media")})}if(z.poster){z.poster=v.call(n,z.poster,"poster",C.name)}}if(C.name==="object"){L=C;k=C.getAll("embed")[0]}if(C.name==="embed"){k=C}if(C.name==="iframe"){s=C;o="Iframe"}if(L){A=A||L.attr("width");x=x||L.attr("height");G=G||L.attr("style");y=y||L.attr("id");w=w||L.attr("hspace");r=r||L.attr("vspace");D=D||L.attr("align");j=j||L.attr("bgcolor");K.name=L.attr("name");I=L.getAll("param");for(E=0;E<I.length;E++){q=I[E];N=q.remove().attr("name");if(!h[N]){K.params[N]=q.attr("value")}}K.params.src=K.params.src||L.attr("data")}if(k){A=A||k.attr("width");x=x||k.attr("height");G=G||k.attr("style");y=y||k.attr("id");w=w||k.attr("hspace");r=r||k.attr("vspace");D=D||k.attr("align");j=j||k.attr("bgcolor");for(N in k.attributes.map){if(!h[N]&&!K.params[N]){K.params[N]=k.attributes.map[N]}}}if(s){A=s.attr("width");x=s.attr("height");G=G||s.attr("style");y=s.attr("id");w=s.attr("hspace");r=s.attr("vspace");D=s.attr("align");j=s.attr("bgcolor");tinymce.each(d,function(i){M.attr(i,s.attr(i))});for(N in s.attributes.map){if(!h[N]&&!K.params[N]){K.params[N]=s.attributes.map[N]}}}if(K.params.movie){K.params.src=K.params.src||K.params.movie;delete K.params.movie}if(K.params.src){K.params.src=v.call(n,K.params.src,"src","object")}if(F){if(C.name==="video"){o=H.video.name}else{if(C.name==="audio"){o=H.audio.name}}}if(L&&!o){o=(J(L,"clsid")||J(L,"classid")||J(L,"type")||{}).name}if(k&&!o){o=(J(k,"type")||p(K.params.src)||{}).name}if(k&&o=="EmbeddedAudio"){K.params.type=k.attr("type")}C.replace(M);if(k){k.remove()}if(L){t=u(L.remove());if(t){K.object_html=t}}if(F){t=u(F.remove());if(t){K.video_html=t}}K.hspace=w;K.vspace=r;K.align=D;K.bgcolor=j;M.attr({id:y,"class":"mceItemMedia mceItem"+(o||"Flash"),style:G,width:A||(C.name=="audio"?"300":"320"),height:x||(C.name=="audio"?"32":"240"),hspace:w,vspace:r,align:D,bgcolor:j,"data-mce-json":g.serialize(K,"'")})}});tinymce.PluginManager.add("media",tinymce.plugins.MediaPlugin)})(); | PypiClean |
/Brian2GeNN-1.7.0-py3-none-any.whl/brian2genn/sphinxext/docscrape_sphinx.py | import inspect
import pydoc
import re
import textwrap
from sphinx.pycode import ModuleAnalyzer
from .docscrape import ClassDoc, FunctionDoc, NumpyDocString
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
if config is None:
config = {}
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
@staticmethod
def _str_header(name, symbol="`"):
return [f".. rubric:: {name}", ""]
@staticmethod
def _str_field_list(name):
return [f":{name}:"]
@staticmethod
def _str_indent(doc, indent=4):
out = []
for line in doc:
out += [" " * indent + line]
return out
def _str_summary(self):
return self["Summary"] + [""]
def _str_extended_summary(self):
return self["Extended Summary"] + [""]
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += [""]
for param, param_type, desc in self[name]:
out += self._str_indent([f"**{param.strip()}** : {param_type}"])
out += [""]
out += self._str_indent(desc, 8)
out += [""]
return out
@property
def _obj(self):
if hasattr(self, "_cls"):
return self._cls
elif hasattr(self, "_f"):
return self._f
return None
def _str_member_list(self):
"""
Generate a member listing, autosummary:: table .
"""
out = []
for name in ["Attributes", "Methods"]:
if not self[name]:
continue
out += [f".. rubric:: {name}", ""]
prefix = getattr(self, "_name", "")
if prefix:
prefix = f"{prefix}."
autosum = []
for param, _, desc in self[name]:
param = param.strip()
if self._obj:
# Fake the attribute as a class property, but do not touch
# methods
if hasattr(self._obj, "__module__") and not (
hasattr(self._obj, param)
and callable(getattr(self._obj, param))
):
# Do not override directly provided docstrings
if not len("".join(desc).strip()):
analyzer = ModuleAnalyzer.for_module(self._obj.__module__)
desc = analyzer.find_attr_docs().get(
(self._obj.__name__, param), ""
)
# Only fake a property if we got a docstring
if len("".join(desc).strip()):
setattr(
self._obj,
param,
property(lambda self: None, doc="\n".join(desc)),
)
if len(prefix):
autosum += [f" ~{prefix}{param}"]
else:
autosum += [f" {param}"]
if autosum:
out += [".. autosummary::", ""]
out += autosum
out += [""]
return out
def _str_member_docs(self, name):
"""
Generate the full member autodocs
"""
out = []
if self[name]:
prefix = getattr(self, "_name", "")
if prefix:
prefix += "."
for param, _, _ in self[name]:
if name == "Methods":
out += [f".. automethod:: {prefix}{param}"]
elif name == "Attributes":
out += [f".. autoattribute:: {prefix}{param}"]
out += [""]
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += [""]
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += [""]
return out
def _str_see_also(self, func_role):
out = []
if self["See Also"]:
see_also = super()._str_see_also(func_role)
out = [".. seealso::", ""]
out += self._str_indent(see_also[2:])
return out
def _str_raises(self, name, func_role):
if not self[name]:
return []
out = []
out += self._str_header(name)
for func, _, desc in self[name]:
out += [f":exc:`{func}`"]
if desc:
out += self._str_indent([" ".join(desc)])
out += [""]
return out
def _str_warnings(self):
out = []
if self["Warnings"]:
out = [".. warning::", ""]
out += self._str_indent(self["Warnings"])
return out
def _str_index(self):
idx = self["index"]
out = []
if len(idx) == 0:
return out
out += [f".. index:: {idx.get('default', '')}"]
for section, references in idx.items():
if section == "default":
continue
elif section == "refguide":
out += [f" single: {', '.join(references)}"]
else:
out += [f" {section}: {','.join(references)}"]
return out
def _str_references(self):
out = []
if self["References"]:
out += self._str_header("References")
if isinstance(self["References"], str):
self["References"] = [self["References"]]
out.extend(self["References"])
out += [""]
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
out += [".. only:: latex", ""]
items = []
for line in self["References"]:
m = re.match(r".. \[([a-z0-9._-]+)\]", line, re.I)
if m:
items.append(m.group(1))
out += [f" {', '.join([f'[{item}]_' for item in items])}", ""]
return out
def _str_examples(self):
return self._str_section("Examples")
def __str__(self, indent=0, func_role="brianobj"):
out = []
out += self._str_index() + [""]
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ("Parameters", "Returns", "Other Parameters"):
out += self._str_param_list(param_list)
for param_list in ("Raises", "Warns"):
out += self._str_raises(param_list, func_role)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section("Notes")
out += self._str_references()
out += self._str_examples()
out += self._str_member_list()
if self["Attributes"] + self["Methods"]:
out += [".. rubric:: Details", ""]
for param_list in ("Attributes", "Methods"):
out += self._str_member_docs(param_list)
out = self._str_indent(out, indent)
return "\n".join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config=None):
if config is None:
config = {}
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, name=None, config=None):
if config is None:
config = {}
self.name = name
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
if config is None:
config = {}
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, name=None, config=None):
if config is None:
config = {}
if what is None:
if inspect.isclass(obj):
what = "class"
elif inspect.ismodule(obj):
what = "module"
elif callable(obj):
what = "function"
else:
what = "object"
if what == "class":
return SphinxClassDoc(
obj, func_doc=SphinxFunctionDoc, doc=doc, name=name, config=config
)
elif what in ("function", "method"):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config) | PypiClean |
/Dhelpers-0.1.5rc1.tar.gz/Dhelpers-0.1.5rc1/Dlib/Dpowers/events/trigman.py | from warnings import warn
from . import NamedKey, keyb, NamedButton
from ..winapps import pythoneditor
from .event_classes import StringAnalyzer, EventCombination, EventSequence, \
StringEvent
from .hookpower import HookAdaptor, CallbackHook, KeyhookBase, ButtonhookBase
from Dhelpers.launcher import launch
from Dhelpers.baseclasses import TimedObject, KeepInstanceRefs
from Dhelpers.arghandling import check_type
from Dhelpers.counting import dpress
import collections, inspect
from os import path
class PatternListener:
max_thread_num = 1
def __init__(self, buffer = 4):
self.eventdict = dict()
self.recent_events = collections.deque()
self.buffer = buffer
self.blocked_hks = []
self.stringevent_analyzer = StringAnalyzer(NamedKey, NamedButton)
self.reset_analyzefunc()
self.active_thread_num = 0
def event(self, k):
if not self.eventdict: return
# _print(k)
if self.active_thread_num >= self.max_thread_num: return
self.recent_events.append(k)
if len(self.recent_events) > self.buffer: self.recent_events.popleft()
recent_events = self.recent_events.copy()
for event, action in self.eventdict.items():
members = event.members
lm = len(members)
if lm > len(recent_events): continue
# if more members are required than actual events passed
for i in range(-1, -lm - 1, -1):
if members[i] != recent_events[i]: break
else:
launch.thread(self.runscript, action, event)
# this means that for each member event, the suiting recent
# event has been found
self.recent_events.clear()
def runscript(self, action, hk):
if self.active_blocks: return
if self.analyze_func:
if self.analyze_func(action, hk) is True: self.reset_analyzefunc()
return
assert self.active_thread_num >= 0
if self.active_thread_num >= self.max_thread_num: return
self.active_thread_num += 1
try:
# print(hk_func)
# print(hk,hk_func,type(hk_func))
if type(action) is str:
if action.startswith("[dkeys]"):
if dpress(hk, 0.15):
keyb.send(action[7:], delay=1)
else:
keyb.send(action)
elif callable(action):
# the following makes sure that the hk_func is accepting 1
# argument even if the underlying func does not
try:
return action(hk)
except TypeError:
pass
return action()
else:
raise TypeError
finally:
self.active_thread_num -= 1
def set_analyzefunc(self, func, timeout=5):
assert callable(func)
self.analyze_func = func
launch.thread(self.reset_analyzefunc, initial_time_delay=timeout)
def reset_analyzefunc(self):
self.analyze_func = None
def show_source_code(self, timeout=5):
self.set_analyzefunc(self._jump_to_action, timeout=timeout)
@staticmethod
def _jump_to_action(action, hk):
file = path.abspath(inspect.getsourcefile(action))
linenumber = inspect.getsourcelines(action)[-1]
pythoneditor.jump_to_line(file, linenumber)
return True
def add_event(self, event, action):
if event in self.eventdict:
raise ValueError(f"Tiggerevent {event} defined more than one time.")
self.eventdict[event] = action
def add_sequence(self, string, action):
event = self.stringevent_analyzer.create_from_str(
string).hotkey_version()
self.add_event(event, action)
def add_hotkey(self, string, action, rls=False, block=True):
event = self.stringevent_analyzer.create_from_str(string)
if isinstance(event, EventSequence): raise ValueError
if isinstance(event, StringEvent):
if not event.press: raise ValueError
if block: self.blocked_hks.append(event)
if rls: event += event.reverse()
elif isinstance(event, EventCombination):
event = event.hotkey_version(rls=rls)
else:
raise TypeError
self.add_event(event, action)
# A decorator
def sequence(self, *strings):
def decorator(decorated_func):
for string in strings: self.add_sequence(string, decorated_func)
return decorated_func
return decorator
# A decorator
def hotkey(self, *strings, rls=False, block=True):
def decorator(decorated_func):
for string in strings:
self.add_hotkey(string, decorated_func, rls=rls, block=block)
return decorated_func
return decorator
def hotkey_rls(self, *strings, block=True):
return self.hotkey(*strings, rls=True, block=block)
def add_triggerdict(self, triggerdict):
for eventstring, action in triggerdict.items():
self.add_sequence(eventstring, action)
active_blocks = 0
@classmethod
def block(cls):
cls.active_blocks += 1 # print(cls.active_blocks)
@classmethod
def unblock(cls, delay=None):
if delay:
launch.thread(cls._unblock, initial_time_delay=delay)
else:
cls._unblock()
@classmethod
def _unblock(cls):
cls.active_blocks -= 1
# print(cls.active_blocks)
if cls.active_blocks < 0: raise ValueError
@classmethod
def paused(cls, timeout=10):
return PauseObject(timeout, cls)
# this creates a PauseObject suitable for a with statement
# usage: with instance.paused(): or with TriggerManager.paused():
class RegisteredHook(PatternListener):
def __init__(self, buffer, hook_instance, container_triggerman=None):
super().__init__(buffer)
check_type(CallbackHook, hook_instance)
if isinstance(hook_instance, KeyhookBase):
self.stringevent_analyzer = NamedKey.Event
elif isinstance(hook_instance, ButtonhookBase):
self.stringevent_analyzer = NamedButton.Event
self.hook_instance = hook_instance(self.event)
self.triggerman_instance = container_triggerman
def event(self, k):
super().event(k)
if self.triggerman_instance: self.triggerman_instance.event(k)
def start(self):
if self.blocked_hks or self.triggerman_instance and \
self.triggerman_instance.blocked_hks:
try:
self.hook_instance = self.hook_instance(reinject_func =
self.reinject_func )
except NotImplementedError:
if self.blocked_hks: raise
return self.hook_instance.start()
def stop(self):
return self.hook_instance.stop()
def reinject_func(self, event_obj):
if event_obj in self.blocked_hks: return False
if self.triggerman_instance and event_obj in \
self.triggerman_instance.blocked_hks: return False
return True
@property
def analyze_func(self):
if self._analyze_func: return self._analyze_func
if self.triggerman_instance: return self.triggerman_instance.analyze_func
@analyze_func.setter
def analyze_func(self, val):
self._analyze_func = val
def reset_analyzefunc(self):
try:
if self._analyze_func is None:
if self.triggerman_instance and self.triggerman_instance.analyze_func:
self.triggerman_instance.analyze_func = None
except AttributeError:
pass
self.analyze_func = None
class TriggerManager(PatternListener,TimedObject, HookAdaptor.AdaptiveClass,
KeepInstanceRefs):
adaptor = HookAdaptor(group="triggerman",
_primary_name="TriggerManager.adaptor")
def __init__(self, timeout=60, buffer=4):
PatternListener.__init__(self,buffer=buffer)
TimedObject.__init__(self,timeout=timeout)
KeepInstanceRefs.__init__(self)
self.registered_hooks = []
self.was_started = False
self.max_thread_num = TriggerManager.max_thread_num
@classmethod
def start_all(cls):
for inst in cls.get_instances():
if inst.was_started is False: inst.start()
def add_hook(self, hook_instance, buffer=None,**hook_kwargs):
timeout = None if self.timeout is None else self.timeout + 5
hook_instance = hook_instance(timeout=timeout, **hook_kwargs)
buffer = self.buffer if buffer is None else buffer
new_registered_hook = RegisteredHook(buffer, hook_instance,
container_triggerman=self)
self.registered_hooks.append(new_registered_hook)
return new_registered_hook
def hook_keys(self, backend=None, **hook_kwargs):
if backend:
adaptor = self.adaptor_class(keys=backend)
else:
adaptor = self.adaptor
return self.add_hook(adaptor.keys(),**hook_kwargs)
def hook_buttons(self,backend=None,**hook_kwargs):
if backend:
adaptor = self.adaptor_class(buttons=backend)
else:
adaptor = self.adaptor
return self.add_hook(adaptor.buttons(), **hook_kwargs)
def hook_custom(self, backend=None,**hook_kwargs):
if backend:
adaptor = self.adaptor_class(custom=backend)
else:
adaptor = self.adaptor
return self.add_hook(adaptor.custom(), **hook_kwargs)
def _start_action(self):
self.was_started = True
for rhook in self.registered_hooks: rhook.start()
def _stop_action(self):
for rhook in self.registered_hooks:
try:
rhook.stop()
except Exception as e:
warn(e)
def _timeout_action(self):
warn("Timeout after %s seconds: Stopping TriggerManager %s." % (
self.timeout,self))
@property
def max_thread_num(self):
return self._max_thread_num
@max_thread_num.setter
def max_thread_num(self, val):
self._max_thread_num = val
for rhook in self.registered_hooks:
rhook.max_thread_num = val
class PauseObject(TimedObject):
def __init__(self, timeout, cls):
super().__init__(timeout=timeout)
self.cls = cls
def _start_action(self):
self.cls.block()
def _stop_action(self):
self.cls.unblock(delay=0.1)
# delaying the unblock is necessary because when the user types,
# the key up events are often catched without intention otherwise | PypiClean |
/AxlNLP-0.0.1.tar.gz/AxlNLP-0.0.1/axlnlp/features/document_positions.py | from axlnlp.features.base import FeatureModel
import pandas as pd
import numpy as np
class DocPos(FeatureModel):
"""
Creates a vector representing a sentences position in the text.
inital_texts are needed to create this feature
feature is specifically implemented for replication of Joint Pointer NN from :
https://arxiv.org/pdf/1612.08994.pdf
"(3) Structural features: Whether or not the AC is the first AC in a paragraph,
and Whether the AC is in an opening, body, or closing paragraph."
"""
def __init__(self, dataset):
self._dataset = dataset
self._name = "DocPos"
self._feature_dim = 2
@property
def dataset(self):
return self._dataset
# @property
# def trainable(self):
# return False
def __doc_pos(self, row, doc2paralen, step_level):
vec = [0]*2
doc_id = row["document_id"]
row_id = row["id"]
if row[f"p_{step_level}_id"] == 0:
vec[0]= 1
if row[f"d_paragraph_id"]== 0:
pass
elif row[f"d_paragraph_id"] == doc2paralen[doc_id]-1:
vec[1] = 2
else:
vec[1] = 1
return {"vec":np.array(vec),"document_id":doc_id, f"{step_level}_id":row_id}
def extract(self, sample_ids:list=None, pad=True):
"""
# 1) is the first AC in a paragaraph
# 2) is is in first, body, or last paragraph
FUNC()
# We can get 1 by checking the local id of each ac and its paragraph_id, if both
# we just need to know hte nr of paragaraphs in each document,
# then we can make conditions
# ac_para_id == 0 == FIRST
# ac_para_id == nr_para == LAST
# else: BODY
# feature representation
alt 1:
one hot encodig where dimN == {0,1}, dim size = 4
dim0 = 1 if item is first in sample 0 if not
dim1 = 1 if item is in first paragraph if not
dim2 = 1 if item is in body 0 if not
dim3 = 1 if item is in last paragraph 0 if not
alt 2:
one hot encodig where dim0 == {0,1}
dim0 = 1 if item is first in sample 0 if not
and dim1 = {0,1,2}
dim1 = 0 if item in first paragrap, 1 if in body, 2 if in last paragraph
"""
step_level = self.dataset.prediction_level
sample_level = self.dataset.sample_level
if sample_level != "document":
raise ValueError("Sample level needs to be 'document' for this feature")
# we will look at the position of the prediction level items
df = self.dataset.level_dfs[step_level]
# filtering if we are given sample ids else we take everything on sample level
if sample_ids:
sample_level_id = f"{sample_level}_id"
df = df[df[sample_level_id].isin(sample_ids)]
nr_ids = len(sample_ids)
nr_ids = len(sample_ids)
if nr_ids > 1:
pad = True
else:
nr_ids = max(self.dataset.level_dfs[sample_level]["id"].to_numpy()) + 1
if pad:
max_sample_len = self.dataset.max_sample_length
output = np.zeros((nr_ids, max_sample_len, 2))
#create a dict of document 2 number of paragraphs
para_groups = self.dataset.level_dfs["paragraph"].groupby("document_id", sort=False)
doc2paralen = {i:g.shape[0] for i,g in para_groups}
args = [doc2paralen, step_level]
vec_rows = list(df.apply(self.__doc_pos, axis=1, args=args).to_numpy())
new_df = pd.DataFrame(vec_rows)
f = lambda x: x["vec"]
test = new_df.groupby("document_id")["vec"] #.apply(f)
for i, (doc_i,g) in enumerate(test):
output[i][:g.shape[0]] = np.stack(g.to_numpy())
return output | PypiClean |
/ConSav-0.11.tar.gz/ConSav-0.11/consav/linear_interp_4d.py | import numpy as np
from numba import njit, boolean, int32, double, void
from .linear_interp import binary_search
@njit(double(double[:],double[:],double[:],double[:],double[:,:,:,:],double,double,double,double,int32,int32,int32,int32),fastmath=True)
def _interp_4d(grid1,grid2,grid3,grid4,value,xi1,xi2,xi3,xi4,j1,j2,j3,j4):
""" 4d interpolation for one point with known location
Args:
grid1 (numpy.ndarray): 1d grid
grid2 (numpy.ndarray): 1d grid
grid3 (numpy.ndarray): 1d grid
grid4 (numpy.ndarray): 1d grid
value (numpy.ndarray): value array (4d)
xi1 (double): input point
xi2 (double): input point
xi3 (double): input point
xi4 (double): input point
j1 (int): location in grid
j2 (int): location in grid
j3 (int): location in grid
j4 (int): location in grid
Returns:
yi (double): output
"""
# a. left/right
nom_1_left = grid1[j1+1]-xi1
nom_1_right = xi1-grid1[j1]
nom_2_left = grid2[j2+1]-xi2
nom_2_right = xi2-grid2[j2]
nom_3_left = grid3[j3+1]-xi3
nom_3_right = xi3-grid3[j3]
nom_4_left = grid4[j4+1]-xi4
nom_4_right = xi4-grid4[j4]
# b. interpolation
denom = (grid1[j1+1]-grid1[j1])*(grid2[j2+1]-grid2[j2])*(grid3[j3+1]-grid3[j3])*(grid4[j4+1]-grid4[j4])
nom = 0
for k1 in range(2):
nom_1 = nom_1_left if k1 == 0 else nom_1_right
for k2 in range(2):
nom_2 = nom_2_left if k2 == 0 else nom_2_right
for k3 in range(2):
nom_3 = nom_3_left if k3 == 0 else nom_3_right
for k4 in range(2):
nom_4 = nom_4_left if k4 == 0 else nom_4_right
nom += nom_1*nom_2*nom_3*nom_4*value[j1+k1,j2+k2,j3+k3,j4+k4]
return nom/denom
@njit(double(double[:],double[:],double[:],double[:],double[:,:,:,:],double,double,double,double),fastmath=True)
def interp_4d(grid1,grid2,grid3,grid4,value,xi1,xi2,xi3,xi4):
""" 4d interpolation for one point
Args:
grid1 (numpy.ndarray): 1d grid
grid2 (numpy.ndarray): 1d grid
grid3 (numpy.ndarray): 1d grid
grid4 (numpy.ndarray): 1d grid
value (numpy.ndarray): value array (4d)
xi1 (double): input point
xi2 (double): input point
xi3 (double): input point
xi4 (double): input point
Returns:
yi (double): output
"""
# a. search in each dimension
j1 = binary_search(0,grid1.size,grid1,xi1)
j2 = binary_search(0,grid2.size,grid2,xi2)
j3 = binary_search(0,grid3.size,grid3,xi3)
j4 = binary_search(0,grid4.size,grid4,xi4)
return _interp_4d(grid1,grid2,grid3,grid4,value,xi1,xi2,xi3,xi4,j1,j2,j3,j4) | PypiClean |
/KeyHarbor-0.0.1.tar.gz/KeyHarbor-0.0.1/keyharbor/client.py | import hashlib
import json
import io
import time
import requests
from werkzeug.datastructures import MultiDict
from . import exceptions
__all__ = ['Client']
class APIClient:
"""
A client for the KeyHarbor API.
"""
def __init__(self,
account_id,
api_key,
api_base_url='https://api.keyharbor.io',
timeout=None
):
# The Id of the KeyHarbor account the API key relates to
self._account_id = account_id
# A key used to authenticate API calls to an account
self._api_key = api_key
# The base URL to use when calling the API
self._api_base_url = api_base_url
# The period of time before requests to the API should timeout
self._timeout = timeout
# NOTE: Rate limiting information is only available after a request
# has been made.
# The maximum number of requests per second that can be made with the
# given API key.
self._rate_limit = None
# The time (seconds since epoch) when the current rate limit will
# reset.
self._rate_limit_reset = None
# The number of requests remaining within the current limit before the
# next reset.
self._rate_limit_remaining = None
@property
def rate_limit(self):
return self._rate_limit
@property
def rate_limit_reset(self):
return self._rate_limit_reset
@property
def rate_limit_remaining(self):
return self._rate_limit_remaining
def __call__(
self,
method,
path,
params=None,
data=None,
totp=None
):
"""Call the API"""
# Filter out params/data set to `None` and ensure all arguments are
# converted to strings.
if params:
params = {
k: _ensure_string(v)
for k, v in params.items() if v is not None
}
if data:
data = {
k: _ensure_string(v)
for k, v in data.items() if v is not None
}
# Build the signature
signature_data = MultiDict(params if method.lower() == 'get' else data)\
.to_dict(False)
signature_values = []
for key, value in signature_data.items():
signature_values.append(key)
if isinstance(value, list):
signature_values += value
else:
signature_values.append(value)
signature_body = ''.join(signature_values)
timestamp = str(time.time())
signature = hashlib.sha1()
signature.update(
''.join([
timestamp,
signature_body,
self._account_id
]).encode('utf8')
)
signature = signature.hexdigest()
# Build headers
headers = {
'Accept': 'application/json',
'X-KeyHarbor-AccountId': self._account_id,
'X-KeyHarbor-APIKey': self._api_key,
'X-KeyHarbor-Signature': signature,
'X-KeyHarbor-Timestamp': timestamp
}
if totp:
headers['X-KeyHarbor-TOTP'] = totp
# Make the request
r = getattr(requests, method.lower())(
f'{self._api_base_url}/v1/{path}',
headers=headers,
params=params,
data=data,
timeout=self._timeout
)
# Update the rate limit
if 'X-KeyHarbor-RateLimit-Limit' in r.headers:
self._rate_limit = int(r.headers['X-KeyHarbor-RateLimit-Limit'])
self._rate_limit_reset \
= float(r.headers['X-KeyHarbor-RateLimit-Reset'])
self._rate_limit_remaining \
= int(r.headers['X-KeyHarbor-RateLimit-Remaining'])
# Handle a successful response
if r.status_code in [200, 204]:
return r.json()
# Raise an error related to the response
try:
error = r.json()
except ValueError:
error = {}
error_cls = exceptions.APIException.get_class_by_status_code(
r.status_code
)
raise error_cls(
r.status_code,
error.get('hint'),
error.get('arg_errors')
)
# Utils
def _ensure_string(v):
"""
Ensure values that will be convered to a form-encoded value is a string
(or list of strings).
"""
if isinstance(v, (list, tuple)):
return list([str(i) for i in v])
return str(v) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/crm/api/sync_status_api.py | import re # noqa: F401
import sys # noqa: F401
from MergePythonSDK.shared.api_client import ApiClient, Endpoint as _Endpoint
from MergePythonSDK.shared.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from MergePythonSDK.shared.model.merge_paginated_response import MergePaginatedResponse
from MergePythonSDK.crm.model.sync_status import SyncStatus
class SyncStatusApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.sync_status_list_endpoint = _Endpoint(
settings={
'response_type': (MergePaginatedResponse(SyncStatus),),
'auth': [
'accountTokenAuth',
'bearerAuth'
],
'endpoint_path': '/crm/v1/sync-status',
'operation_id': 'sync_status_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'cursor',
'page_size',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'cursor':
(str,),
'page_size':
(int,),
},
'attribute_map': {
'cursor': 'cursor',
'page_size': 'page_size',
},
'location_map': {
'cursor': 'query',
'page_size': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def sync_status_list(
self,
**kwargs
) -> "MergePaginatedResponse(SyncStatus)":
"""sync_status_list # noqa: E501
Get syncing status. Possible values: `DISABLED`, `DONE`, `FAILED`, `PAUSED`, `SYNCING` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.sync_status_list(async_req=True)
>>> result = thread.get()
Keyword Args:
cursor (str): The pagination cursor value.. [optional]
page_size (int): Number of results to return per page.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
MergePaginatedResponse(SyncStatus)
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.sync_status_list_endpoint.call_with_http_info(**kwargs) | PypiClean |
/HMOBSTER-0.0.44.tar.gz/HMOBSTER-0.0.44/mobster/BoundedPareto.py | from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions import constraints
from numbers import Number
import torch
# class BoundedPareto(Rejector):
# def __init__(self, scale, alpha, upper_limit, validate_args=False):
# propose = Pareto(scale, alpha, validate_args=validate_args)
#
# def log_prob_accept(x):
# return (x < upper_limit).type_as(x).log()
#
# #log_scale = torch.Tensor(alpha) * torch.log(torch.Tensor([scale / upper_limit]))
# log_scale = torch.log(Pareto(scale, alpha).cdf(upper_limit))
# super(BoundedPareto, self).__init__(propose, log_prob_accept, log_scale)
class BoundedPareto(TorchDistribution):
has_rsample = True
arg_constraints = {"scale": constraints.positive, "alpha": constraints.positive,
"upper_limit" : constraints.positive}
def __init__(self, scale, alpha, upper_limit, validate_args=False):
self.scale = scale
self.alpha = alpha
self.upper_lim = upper_limit
if isinstance(scale, Number) and isinstance(alpha, Number) and isinstance(upper_limit, Number):
batch_shape = torch.Size()
else:
batch_shape = self.alpha.size()
super(BoundedPareto, self).__init__(batch_shape, validate_args=validate_args)
def ppf(self, value):
Ha = self.upper_lim**self.alpha
La = self.scale**self.alpha
num = -1 * ( value * Ha - value * La - Ha )
dem = Ha * La
return ( (num / dem)**(-1 / self.alpha) )
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
rand = torch.rand(shape, dtype=self.alpha.dtype, device=self.alpha.device)
return self.ppf(rand)
def log_prob(self, value):
mask = torch.logical_and((value < self.upper_lim),(value > self.scale)).type_as(value).log()
num = self.alpha * self.scale**self.alpha * value**(-self.alpha - 1)
den = 1 - (self.scale / self.upper_lim)**(self.alpha)
return torch.log(num/den) + mask | PypiClean |
/OWSLib-0.29.2.tar.gz/OWSLib-0.29.2/owslib/coverage/wcs200.py |
# !!! NOTE: Does not conform to new interfaces yet #################
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from owslib.ows import (
OwsCommon,
ServiceIdentification,
ServiceProvider,
OperationsMetadata,
)
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import dateutil.parser as parser
from datetime import timedelta
import logging
from owslib.util import log, datetime_from_ansi, datetime_from_iso, param_list_to_url_string
# function to save writing out WCS namespace in full each time
def ns(tag):
return "{http://www.opengis.net/ows/2.0}" + tag
def nsWCS2(tag):
return "{http://www.opengis.net/wcs/2.0}" + tag
class WebCoverageService_2_0_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 2.0.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
""" check contents dictionary to allow dict like access to service layers"""
if name in list(self.__getattribute__("contents").keys()):
return self.__getattribute__("contents")[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None, timeout=30, headers=None):
super(WebCoverageService_2_0_0, self).__init__(auth=auth, timeout=timeout, headers=headers)
self.version = "2.0.0"
self.url = url
self.cookies = cookies
self.timeout = timeout
self.ows_common = OwsCommon(version="2.0.0")
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth, headers=self.headers)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
# check for exceptions
se = self._capabilities.find("ServiceException")
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# serviceIdentification metadata
subelem = self._capabilities.find(ns("ServiceIdentification"))
self.identification = ServiceIdentification(
subelem, namespace=self.ows_common.namespace
)
# serviceProvider metadata
serviceproviderelem = self._capabilities.find(ns("ServiceProvider"))
self.provider = ServiceProvider(
serviceproviderelem, namespace=self.ows_common.namespace
)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns("OperationsMetadata"))[:]:
if elem.tag != ns("ExtendedCapabilities"):
self.operations.append(
OperationsMetadata(elem, namespace=self.ows_common.namespace)
)
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(
nsWCS2("Contents/") + nsWCS2("CoverageSummary")
):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [
f.text for f in self._capabilities.findall("Capability/Exception/Format")
]
def items(self):
"""supports dict-like items() access"""
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getCoverage(
self,
identifier=None,
bbox=None,
time=None,
format=None,
subsets=None,
resolutions=None,
sizes=None,
crs=None,
width=None,
height=None,
resx=None,
resy=None,
resz=None,
parameter=None,
method="Get",
timeout=30,
**kwargs
):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
example 2.0.1 URL
http://earthserver.pml.ac.uk/rasdaman/ows?&SERVICE=WCS&VERSION=2.0.1&REQUEST=GetCoverage
&COVERAGEID=V2_monthly_CCI_chlor_a_insitu_test&SUBSET=Lat(40,50)&SUBSET=Long(-10,0)&SUBSET=ansi(144883,145000)&FORMAT=application/netcdf
cvg=wcs.getCoverage(identifier=['myID'], format='application/netcdf', subsets=[('axisName',min,max),
('axisName', min, max),('axisName',min,max)])
"""
log.debug(
"WCS 2.0.0 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, crs=%s, width=%s, height=%s, resx=%s, resy=%s, resz=%s, parameter=%s, method=%s, other_arguments=%s" # noqa
% (
identifier,
bbox,
time,
format,
crs,
width,
height,
resx,
resy,
resz,
parameter,
method,
str(kwargs),
)
)
try:
base_url = next(
(
m.get("url")
for m in self.getOperationByName("GetCoverage").methods
if m.get("type").lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
log.debug("WCS 2.0.0 DEBUG: base url of server: %s" % base_url)
request = {"version": self.version, "request": "GetCoverage", "service": "WCS"}
assert len(identifier) > 0
request["CoverageID"] = identifier[0]
if crs:
request["crs"] = crs
request["format"] = format
if width:
request["width"] = width
if height:
request["height"] = height
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
if subsets:
data += param_list_to_url_string(subsets, 'subset')
if resolutions:
log.debug('Adding vendor-specific RESOLUTION parameter.')
data += param_list_to_url_string(resolutions, 'resolution')
if sizes:
log.debug('Adding vendor-specific SIZE parameter.')
data += param_list_to_url_string(sizes, 'size')
log.debug("WCS 2.0.0 DEBUG: Second part of URL: %s" % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout, headers=self.headers)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
self._elem = elem
self._service = service
self.id = elem.find(nsWCS2("CoverageId")).text
self.title = testXMLValue(elem.find(ns("label")))
self.abstract = testXMLValue(elem.find(ns("description")))
self.keywords = [
f.text for f in elem.findall(ns("keywords") + "/" + ns("keyword"))
]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns("lonLatEnvelope"))
if b is not None:
gmlpositions = b.findall("{http://www.opengis.net/gml}pos")
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]),
float(lc.split()[1]),
float(uc.split()[0]),
float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
grid = ReferenceableGridByVectors(gridelem)
else:
# HERE I LOOK FOR RECTIFIEDGRID
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.2}RectifiedGrid" # noqa
)
grid = RectifiedGrid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints. WCS servers can declare one or both
# or neither of these.
# in wcs 2.0 this can be gathered from the Envelope tag
def _getTimeLimits(self):
# timepoints, timelimits=[],[]
# b=self._elem.find(ns('lonLatEnvelope'))
# if b is not None:
# timepoints=b.findall('{http://www.opengis.net/gml}timePosition')
# else:
# #have to make a describeCoverage request...
# if not hasattr(self, 'descCov'):
# self.descCov=self._service.getDescribeCoverage(self.id)
# for pos in self.descCov.findall(
# ns('CoverageOffering/')+ns('domainSet/')+ns('temporalDomain/')+'{http://www.opengis.net/gml}timePosition'):
# timepoints.append(pos)
# if timepoints:
# timelimits=[timepoints[0].text,timepoints[1].text]
return [self.timepositions[0], self.timepositions[-1]]
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
# irregular time axis
cooeficients = []
grid_axes = gridelem.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis"
)
for elem in grid_axes:
if elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}gridAxesSpanned" # noqa
).text in ["ansi", "unix"]:
cooeficients = elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}coefficients" # noqa
).text.split(" ")
for x in cooeficients:
x = x.replace('"', "")
t_date = datetime_from_iso(x)
timepositions.append(t_date)
else:
# regular time
if len(self.grid.origin) > 2:
t_grid = self.grid
t_date = t_grid.origin[2]
start_pos = parser.parse(t_date, fuzzy=True)
step = float(t_grid.offsetvectors[2][2])
start_pos = start_pos + timedelta(days=(step / 2))
no_steps = int(t_grid.highlimits[2])
for x in range(no_steps):
t_pos = start_pos + timedelta(days=(step * x))
# t_date = datetime_from_ansi(t_pos)
# t_date = t_pos.isoformat()
timepositions.append(t_pos)
else:
# no time axis
timepositions = None
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
""" incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod."""
bboxes = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}boundedBy/" + "{http://www.opengis.net/gml/3.2}Envelope" # noqa
):
bbox = {}
bbox["nativeSrs"] = envelope.attrib["srsName"]
lc = envelope.find("{http://www.opengis.net/gml/3.2}lowerCorner")
lc = lc.text.split()
uc = envelope.find("{http://www.opengis.net/gml/3.2}upperCorner")
uc = uc.text.split()
bbox["bbox"] = (float(lc[0]), float(lc[1]), float(uc[0]), float(uc[1]))
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("responseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("requestResponseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("nativeCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service._capabilities.findall(
nsWCS2("ServiceMetadata/") + nsWCS2("formatSupported")
):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("rangeSet/") + ns("RangeSet/") + ns("axisDescription/") + ns("AxisDescription")
):
axisDescs.append(
AxisDescription(elem)
) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide access
# to the information in the GML.
class Grid(object):
""" Simple grid class to provide axis and value information for a gml grid """
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get("dimension"))
self.lowlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}low" # noqa
).text.split(" ")
self.highlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}high" # noqa
).text.split(" ")
for axis in grid.findall("{http://www.opengis.net/gml/3.2}axisLabels")[
0
].text.split(" "):
self.axislabels.append(axis)
class RectifiedGrid(Grid):
""" RectifiedGrid class, extends Grid with additional offset vector information """
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
"{http://www.opengis.net/gml/3.2}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall(
"{http://www.opengis.net/gml/3.2}offsetVector"
):
self.offsetvectors.append(offset.text.split())
class ReferenceableGridByVectors(Grid):
""" ReferenceableGridByVectors class, extends Grid with additional vector information """
def __init__(self, refereceablegridbyvectors):
super(ReferenceableGridByVectors, self).__init__(refereceablegridbyvectors)
self.origin = refereceablegridbyvectors.find(
"{http://www.opengis.net/gml/3.3/rgrid}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in refereceablegridbyvectors.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis/{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}offsetVector" # noqa
):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
""" Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels"""
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns("name"):
self.name = elem.text
elif elem.tag == ns("label"):
self.label = elem.text
elif elem.tag == ns("values"):
for child in elem.getchildren():
self.values.append(child.text) | PypiClean |
/DDFacet-0.7.2.0.tar.gz/DDFacet-0.7.2.0/SkyModel/PSourceExtract/ClassFitIslands.py | from __future__ import division, absolute_import, print_function
import numpy as np
from SkyModel.Other.progressbar import ProgressBar
from SkyModel.PSourceExtract.ClassGaussFit import ClassGaussFit as ClassFit
class ClassFitIslands():
def __init__(self,IslandClass,NCPU=6):
self.Islands=IslandClass
self.NCPU=NCPU
def FitSerial(self,psf,incr,StdResidual):
PMin,PMaj,PPA=psf
Islands=self.Islands
ImOut=np.zeros(Islands.MaskImage.shape,np.float32)
pBAR = ProgressBar('white', block='=', empty=' ',Title="Fit islands")
sourceList=[]
for i in range(len(Islands.ListX)):
comment='Isl %i/%i' % (i+1,len(Islands.ListX))
pBAR.render(int(100* float(i+1) / len(Islands.ListX)), comment)
xin,yin,zin=np.array(Islands.ListX[i]),np.array(Islands.ListY[i]),np.array(Islands.ListS[i])
#xm=int(np.sum(xin*zin)/np.sum(zin))
#ym=int(np.sum(yin*zin)/np.sum(zin))
# Fit=ClassFit(xin,yin,zin,psf=(PMaj/incr,PMin/incr,PPA),noise=Islands.Noise[xm,ym])
Fit=ClassFit(xin,yin,zin,psf=(PMaj/incr,PMin/incr,PPA+np.pi/2),noise=StdResidual)#,FreePars=["l", "m","s"])
sourceList.append(Fit.DoAllFit())
Fit.PutFittedArray(ImOut)
Islands.FitIm=ImOut
return sourceList
def FitParallel(self,psf,incr,StdResidual):
NCPU=self.NCPU
PMin,PMaj,PPA=psf
Islands=self.Islands
ImOut=np.zeros(Islands.MaskImage.shape,np.float32)
sourceList=[]
work_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
NJobs=len(Islands.ListX)
for iJob in range(NJobs):
work_queue.put([iJob,np.array(Islands.ListX[iJob]),np.array(Islands.ListY[iJob]),np.array(Islands.ListS[iJob])])
workerlist=[]
for ii in range(NCPU):
W=Worker(work_queue, result_queue,psf,incr,StdResidual)
workerlist.append(W)
workerlist[ii].start()
pBAR= ProgressBar('white', width=50, block='=', empty=' ',Title=" Init W ", HeaderSize=10,TitleSize=13)
pBAR.render(0, '%4i/%i' % (0,NJobs))
iResult=0
SourceList=[]
while iResult < NJobs:
DicoResult=result_queue.get()
if DicoResult["Success"]:
iResult+=1
NDone=iResult
intPercent=int(100* NDone / float(NJobs))
pBAR.render(intPercent, '%4i/%i' % (NDone,NJobs))
SourceList.append(DicoResult["FitPars"])
for ii in range(NCPU):
workerlist[ii].shutdown()
workerlist[ii].terminate()
workerlist[ii].join()
return SourceList
#======================================
import multiprocessing
class Worker(multiprocessing.Process):
def __init__(self,
work_queue,
result_queue,
psf,incr,StdResidual):
multiprocessing.Process.__init__(self)
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
self.exit = multiprocessing.Event()
self.psf=psf
self.incr=incr
self.StdResidual=StdResidual
def shutdown(self):
self.exit.set()
def run(self):
while not self.kill_received:
try:
iIsland,xin,yin,zin = self.work_queue.get()
except:
break
PMin,PMaj,PPA=self.psf
incr=self.incr
StdResidual=self.StdResidual
Fit=ClassFit(xin,yin,zin,psf=(PMaj/incr,PMin/incr,PPA+np.pi/2),noise=StdResidual)
FitPars=Fit.DoAllFit()
#Fit.PutFittedArray(ImOut)
self.result_queue.put({"Success":True,"iIsland":iIsland,"FitPars":FitPars}) | PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/docs/source/users/checkpointing.rst | .. _checkpointing:
################################
Checkpointing in FitBenchmarking
################################
In some cases, fitting can take a long time and rerunning the fits to change
output options is inefficient. For this situation we provide a checkpointing
file.
Using the checkpointing feature
===============================
As indicated above this feature is currently only for rendering changes to
the presentation of runs although we plan to extend it to combining and
filtering runs in the future.
By default, when running FitBenchmarking it will create a ``checkpoint`` file in
the results directory which will contain all the information required to create
output tables and plots.
To generate new reports for an existing checkpoint, use the
``--load_checkpoint`` option:
.. code-block:: bash
fitbenchmarking --load_checkpoint
This can use the same options file as in the original run but with the output
changed, or seperate one as long as the results directory and checkpointing file
are the same.
There is also a seperate tool for working with checkpoint files
``fitbenchmarking-cp`` that can be used to regenerate the reports.
.. code-block:: bash
fitbenchmarking-cp report --help
Warnings
========
Using ``--load_checkpoint`` will not re-run any results or run any new
combinations that have been added to the options file.
This command also does not check that the checkpoint file has not been edited
manually. Manual editing may be desired for removing or combining data while
these features are developed but should be done with caution to ensure results
are still comparable.
| PypiClean |
/BOX_KEEP_UPS-1.tar.gz/BOX_KEEP_UPS-1/README.md | # BOX_KEEP_UPS
"Box Keep Ups" is a simple offline game created using Kivy. In this game, the player needs to move the box to ensure
that the ball bounces and does not touch the ground.
# Executable File
The executable file is downloadable at
https://github.com/NativeApkDev/BOX_KEEP_UPS/blob/master/BOX_KEEP_UPS/dist/BoxKeepUps/BoxKeepUps.
# Source Code
Python code used to create the game is available in the files in
https://github.com/NativeApkDev/BOX_KEEP_UPS/tree/master/BOX_KEEP_UPS.
# Installation
Enter the command "pip install BOX_KEEP_UPS".
# How to Use the Executable File?
First, open by double-clicking the file "BoxKeepUps".
How the executable file looks like is shown in the image below (the file is enclosed with a red rectangle).

# About The Game
In the game, your goal is to get the ball to bounce off the box as many times as possible without hitting the ground.
The box can be moved by clicking on the screen to the direction where you want to move the box to. The box can only
be moved in horizontal direction. The following image shows how the game interface looks like.
 | PypiClean |
/Denis_mess_client-1.0.0.tar.gz/Denis_mess_client-1.0.0/client/client/database.py | import datetime
import os
from sqlalchemy import create_engine, Table, Column, Integer, String, Text, \
MetaData, DateTime
from sqlalchemy.orm import mapper, sessionmaker
from sqlalchemy.sql import default_comparator
from common.variables import *
class ClientDatabase:
"""
Класс - оболочка для работы с базой данных клиента.
Использует SQLite базу данных, реализован с помощью
SQLAlchemy ORM и используется классический подход.
"""
class KnownUsers:
"""
Класс - отображение для таблицы всех пользователей.
"""
def __init__(self, user):
self.id = None
self.username = user
class MessageStat:
"""
Класс - отображение для таблицы статистики переданных сообщений.
"""
def __init__(self, contact, direction, message):
self.id = None
self.contact = contact
self.direction = direction
self.message = message
self.date = datetime.datetime.now()
class Contacts:
"""
Класс - отображение для таблицы контактов.
"""
def __init__(self, contact):
self.id = None
self.name = contact
# Конструктор класса:
def __init__(self, name):
# Создаём движок базы данных, поскольку разрешено несколько
# клиентов одновременно, каждый должен иметь свою БД
# Поскольку клиент мультипоточный необходимо отключить
# проверки на подключения с разных потоков,
# иначе sqlite3.ProgrammingError
filename = f'client_{name}.db3'
self.database_engine = create_engine(
f'sqlite:///{os.path.join(CLIENT_DATABASES_PATH, filename)}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
# Создаём объект MetaData
self.metadata = MetaData()
# Создаём таблицу известных пользователей
users = Table('known_users', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String)
)
# Создаём таблицу истории сообщений
history = Table('message_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('contact', String),
Column('direction', String),
Column('message', Text),
Column('date', DateTime)
)
# Создаём таблицу контактов
contacts = Table('contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('name', String, unique=True)
)
# Создаём таблицы
self.metadata.create_all(self.database_engine)
# Создаём отображения
mapper(self.KnownUsers, users)
mapper(self.MessageStat, history)
mapper(self.Contacts, contacts)
# Создаём сессию
Session = sessionmaker(bind=self.database_engine)
self.session = Session()
# Необходимо очистить таблицу контактов, т.к. при запуске они
# подгружаются с сервера.
self.session.query(self.Contacts).delete()
self.session.commit()
def add_contact(self, contact):
"""Метод добавляющий контакт в базу данных."""
if not self.session.query(
self.Contacts).filter_by(
name=contact).count():
contact_row = self.Contacts(contact)
self.session.add(contact_row)
self.session.commit()
def contacts_clear(self):
"""Метод очищающий таблицу со списком контактов."""
self.session.query(self.Contacts).delete()
def del_contact(self, contact):
"""Метод удаляющий определённый контакт."""
self.session.query(self.Contacts).filter_by(name=contact).delete()
def add_users(self, users_list):
"""Метод заполняющий таблицу известных пользователей."""
self.session.query(self.KnownUsers).delete()
for user in users_list:
user_row = self.KnownUsers(user)
self.session.add(user_row)
self.session.commit()
def save_message(self, contact, direction, message):
"""Метод сохраняющий сообщение в базе данных."""
message_row = self.MessageStat(contact, direction, message)
self.session.add(message_row)
self.session.commit()
def get_contacts(self):
"""Метод возвращающий список всех контактов."""
return [contact[0]
for contact in self.session.query(self.Contacts.name).all()]
def get_users(self):
"""Метод возвращающий список всех известных пользователей."""
return [user[0]
for user in self.session.query(self.KnownUsers.username).all()]
def check_user(self, user):
"""Метод проверяющий существует ли пользователь."""
if self.session.query(
self.KnownUsers).filter_by(
username=user).count():
return True
else:
return False
def check_contact(self, contact):
"""Метод проверяющий существует ли контакт."""
if self.session.query(self.Contacts).filter_by(name=contact).count():
return True
else:
return False
def get_history(self, contact):
"""
Метод возвращающий историю сообщений с определённым пользователем.
"""
query = self.session.query(
self.MessageStat).filter_by(
contact=contact)
return [(history_row.contact,
history_row.direction,
history_row.message,
history_row.date) for history_row in query.all()]
# отладка
if __name__ == '__main__':
test_db = ClientDatabase('test1')
# for i in ['test3', 'test4', 'test5']:
# test_db.add_contact(i)
# test_db.add_contact('test4')
# test_db.add_users(['test1', 'test2', 'test3', 'test4', 'test5'])
# test_db.save_message(
# 'test2', 'in', f'Привет! я тестовое сообщение '
# f'от {datetime.datetime.now()}!')
# test_db.save_message(
# 'test2', 'out', f'Привет! я другое тестовое сообщение '
# f'от {datetime.datetime.now()}!')
# print(test_db.get_contacts())
# print(test_db.get_users())
# print(test_db.check_user('test1'))
# print(test_db.check_user('test10'))
print(sorted(test_db.get_history('test2'), key=lambda item: item[3]))
# test_db.del_contact('test4')
# print(test_db.get_contacts()) | PypiClean |
/FinMesh-2.3-py3-none-any.whl/FinMesh-2.3.dist-info/LICENSE.md | # License Overview:
## This software is available for use under the Apache 2.0 License, subject to the Commons Clause Condition.
Essentially this means that the software is 'source-available', free to nearly all users and use-cases. The one thing that is restricted through the Commons Clause is the selling of the software where the code has not been substantially changed. If you are not 'adding-value' to this code before selling it, you will have to negotiate a commercial license with the licensor.
Up until June 5th, 2021, this software was licensed under a no-conditions MIT license. Individuals using this software have the same permissions under this new license as they did with the MIT. The only difference, again, is that the software cannot be sold in a substantially unchaged state without a negotiated license.
# “Commons Clause” License Condition
## Version 1.0
https://commonsclause.com/
The Software is provided to you by the Licensor under the License, as defined below, subject to the following condition.
Without limiting other conditions in the License, the grant of rights under the License will not include, and the License does not grant to you, the right to Sell the Software.
For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/ support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice.
# Apache License
## Version 2.0, January 2004
http://www.apache.org/licenses/
#### TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
#### END OF TERMS AND CONDITIONS
Copyright 2021 Michael Hartmann
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| PypiClean |
/0x_order_utils-4.0.1-py3-none-any.whl/zero_ex/order_utils/asset_data_utils.py |
from typing import NamedTuple
import eth_abi
from deprecated.sphinx import deprecated
from zero_ex.dev_utils import abi_utils
from zero_ex.dev_utils.type_assertions import assert_is_string, assert_is_int
ERC20_ASSET_DATA_BYTE_LENGTH = 36
ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH = 53
SELECTOR_LENGTH = 10
class ERC20AssetData(NamedTuple):
"""Object interface to ERC20 asset data."""
asset_proxy_id: str
"""Asset proxy identifier."""
token_address: str
"""Token address"""
class ERC721AssetData(NamedTuple):
"""Object interface to ERC721 asset data."""
asset_proxy_id: str
"""Asset proxy identifier."""
token_address: str
"""Token address"""
token_id: int
"""Token identifier."""
@deprecated(reason='use `"0x"+encode_erc20().hex()` instead', version="4.0.0")
def encode_erc20_asset_data(token_address: str) -> str:
"""Encode an ERC20 token address into an asset data string.
:param token_address: the ERC20 token's contract address.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc20_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48')
'0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48'
"""
assert_is_string(token_address, "token_address")
return (
"0x"
+ abi_utils.simple_encode("ERC20Token(address)", token_address).hex()
)
def encode_erc20(token_address: str) -> bytes:
"""Encode an ERC20 token address into asset data bytes.
:param token_address: the ERC20 token's contract address.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc20('0x1dc4c1cefef38a777b15aa20260a54e584b16c48').hex()
'f47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48'
"""
assert_is_string(token_address, "token_address")
return abi_utils.simple_encode("ERC20Token(address)", token_address)
def decode_erc20_asset_data(asset_data: str) -> ERC20AssetData:
"""Decode an ERC20 asset data hex string.
:param asset_data: String produced by prior call to encode_erc20_asset_data()
>>> decode_erc20_asset_data("0xf47261b00000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c48")
ERC20AssetData(asset_proxy_id='0xf47261b0', token_address='0x1dc4c1cefef38a777b15aa20260a54e584b16c48')
""" # noqa: E501 (line too long)
assert_is_string(asset_data, "asset_data")
if len(asset_data) < ERC20_ASSET_DATA_BYTE_LENGTH:
raise ValueError(
"Could not decode ERC20 Proxy Data. Expected length of encoded"
+ f" data to be at least {str(ERC20_ASSET_DATA_BYTE_LENGTH)}."
+ f" Got {str(len(asset_data))}."
)
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if asset_proxy_id != abi_utils.method_id("ERC20Token", ["address"]):
raise ValueError(
"Could not decode ERC20 Proxy Data. Expected Asset Proxy Id to be"
+ f" ERC20 ({abi_utils.method_id('ERC20Token', ['address'])})"
+ f" but got {asset_proxy_id}."
)
# workaround for https://github.com/PyCQA/pylint/issues/1498
# pylint: disable=unsubscriptable-object
token_address = eth_abi.decode_abi(
["address"], bytes.fromhex(asset_data[SELECTOR_LENGTH:])
)[0]
return ERC20AssetData(
asset_proxy_id=asset_proxy_id, token_address=token_address
)
@deprecated(reason='use `"0x"+encode_erc721().hex()` instead', version="4.0.0")
def encode_erc721_asset_data(token_address: str, token_id: int) -> str:
"""Encode an ERC721 asset data hex string.
:param token_address: the ERC721 token's contract address.
:param token_id: the identifier of the asset's instance of the token.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc721_asset_data('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1)
'0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001'
""" # noqa: E501 (line too long)
assert_is_string(token_address, "token_address")
assert_is_int(token_id, "token_id")
return (
"0x"
+ abi_utils.simple_encode(
"ERC721Token(address,uint256)", token_address, token_id
).hex()
)
def encode_erc721(token_address: str, token_id: int) -> bytes:
"""Encode an ERC721 token address into asset data bytes.
:param token_address: the ERC721 token's contract address.
:param token_id: the identifier of the asset's instance of the token.
:returns: hex encoded asset data string, usable in the makerAssetData or
takerAssetData fields in a 0x order.
>>> encode_erc721('0x1dc4c1cefef38a777b15aa20260a54e584b16c48', 1).hex()
'025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001'
""" # noqa: E501 (line too long)
assert_is_string(token_address, "token_address")
assert_is_int(token_id, "token_id")
return abi_utils.simple_encode(
"ERC721Token(address,uint256)", token_address, token_id
)
def decode_erc721_asset_data(asset_data: str) -> ERC721AssetData:
"""Decode an ERC721 asset data hex string.
>>> decode_erc721_asset_data('0x025717920000000000000000000000001dc4c1cefef38a777b15aa20260a54e584b16c480000000000000000000000000000000000000000000000000000000000000001')
ERC721AssetData(asset_proxy_id='0x02571792', token_address='0x1dc4c1cefef38a777b15aa20260a54e584b16c48', token_id=1)
""" # noqa: E501 (line too long)
assert_is_string(asset_data, "asset_data")
if len(asset_data) < ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH:
raise ValueError(
"Could not decode ERC721 Asset Data. Expected length of encoded"
+ f"data to be at least {ERC721_ASSET_DATA_MINIMUM_BYTE_LENGTH}. "
+ f"Got {len(asset_data)}."
)
asset_proxy_id: str = asset_data[0:SELECTOR_LENGTH]
if asset_proxy_id != abi_utils.method_id(
"ERC721Token", ["address", "uint256"]
):
raise ValueError(
"Could not decode ERC721 Asset Data. Expected Asset Proxy Id to be"
+ " ERC721 ("
+ f"{abi_utils.method_id('ERC721Token', ['address', 'uint256'])}"
+ f"), but got {asset_proxy_id}"
)
(token_address, token_id) = eth_abi.decode_abi(
["address", "uint256"], bytes.fromhex(asset_data[SELECTOR_LENGTH:])
)
return ERC721AssetData(
asset_proxy_id=asset_proxy_id,
token_address=token_address,
token_id=token_id,
) | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/node_modules/jquery/src/css/support.js | define( [
"../core",
"../var/document",
"../var/documentElement",
"../var/support"
], function( jQuery, document, documentElement, support ) {
"use strict";
( function() {
// Executing both pixelPosition & boxSizingReliable tests require only one layout
// so they're executed at the same time to save the second computation.
function computeStyleTests() {
// This is a singleton, we need to execute it only once
if ( !div ) {
return;
}
container.style.cssText = "position:absolute;left:-11111px;width:60px;" +
"margin-top:1px;padding:0;border:0";
div.style.cssText =
"position:relative;display:block;box-sizing:border-box;overflow:scroll;" +
"margin:auto;border:1px;padding:1px;" +
"width:60%;top:1%";
documentElement.appendChild( container ).appendChild( div );
var divStyle = window.getComputedStyle( div );
pixelPositionVal = divStyle.top !== "1%";
// Support: Android 4.0 - 4.3 only, Firefox <=3 - 44
reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12;
// Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3
// Some styles come back with percentage values, even though they shouldn't
div.style.right = "60%";
pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36;
// Support: IE 9 - 11 only
// Detect misreporting of content dimensions for box-sizing:border-box elements
boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36;
// Support: IE 9 only
// Detect overflow:scroll screwiness (gh-3699)
// Support: Chrome <=64
// Don't get tricked when zoom affects offsetWidth (gh-4029)
div.style.position = "absolute";
scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12;
documentElement.removeChild( container );
// Nullify the div so it wouldn't be stored in the memory and
// it will also be a sign that checks already performed
div = null;
}
function roundPixelMeasures( measure ) {
return Math.round( parseFloat( measure ) );
}
var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal,
reliableTrDimensionsVal, reliableMarginLeftVal,
container = document.createElement( "div" ),
div = document.createElement( "div" );
// Finish early in limited (non-browser) environments
if ( !div.style ) {
return;
}
// Support: IE <=9 - 11 only
// Style of cloned element affects source element cloned (#8908)
div.style.backgroundClip = "content-box";
div.cloneNode( true ).style.backgroundClip = "";
support.clearCloneStyle = div.style.backgroundClip === "content-box";
jQuery.extend( support, {
boxSizingReliable: function() {
computeStyleTests();
return boxSizingReliableVal;
},
pixelBoxStyles: function() {
computeStyleTests();
return pixelBoxStylesVal;
},
pixelPosition: function() {
computeStyleTests();
return pixelPositionVal;
},
reliableMarginLeft: function() {
computeStyleTests();
return reliableMarginLeftVal;
},
scrollboxSize: function() {
computeStyleTests();
return scrollboxSizeVal;
},
// Support: IE 9 - 11+, Edge 15 - 18+
// IE/Edge misreport `getComputedStyle` of table rows with width/height
// set in CSS while `offset*` properties report correct values.
// Behavior in IE 9 is more subtle than in newer versions & it passes
// some versions of this test; make sure not to make it pass there!
//
// Support: Firefox 70+
// Only Firefox includes border widths
// in computed dimensions. (gh-4529)
reliableTrDimensions: function() {
var table, tr, trChild, trStyle;
if ( reliableTrDimensionsVal == null ) {
table = document.createElement( "table" );
tr = document.createElement( "tr" );
trChild = document.createElement( "div" );
table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate";
tr.style.cssText = "border:1px solid";
// Support: Chrome 86+
// Height set through cssText does not get applied.
// Computed height then comes back as 0.
tr.style.height = "1px";
trChild.style.height = "9px";
// Support: Android 8 Chrome 86+
// In our bodyBackground.html iframe,
// display for all div elements is set to "inline",
// which causes a problem only in Android 8 Chrome 86.
// Ensuring the div is display: block
// gets around this issue.
trChild.style.display = "block";
documentElement
.appendChild( table )
.appendChild( tr )
.appendChild( trChild );
trStyle = window.getComputedStyle( tr );
reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) +
parseInt( trStyle.borderTopWidth, 10 ) +
parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight;
documentElement.removeChild( table );
}
return reliableTrDimensionsVal;
}
} );
} )();
return support;
} ); | PypiClean |
/CloudReg-1.0.1-py3-none-any.whl/cloudreg/scripts/registration_accuracy.py | from .util import aws_cli
import os
import subprocess
import shlex
import requests as r
import numpy as np
import h5py
from cloudvolume import CloudVolume
from collections import defaultdict
import uuid
import argparse
from scipy.io import loadmat
import json
def loadmat_v73(mat_path):
arrays = {}
f = h5py.File(mat_path, "r")
for k, v in f.items():
arrays[k] = np.array(v)
return arrays
class NGLink:
def __init__(self, json_link):
self.points = defaultdict(lambda: "")
self.json_link = json_link
self._set_json_from_link()
def get_annotations(self, points):
annotations = []
for i, j in points.items():
x = {
"point": j.tolist(),
"type": "point",
"id": f"{uuid.uuid1().hex}",
"description": i,
}
annotations.append(x)
return annotations
def get_points_in(self, coordinate_system):
if coordinate_system == "voxel":
return self.points
else:
return {i[0]: (i[1] * self.points_voxel_size) for i in self.points.items()}
def _set_json_from_link(self):
self._json = r.get(self.json_link).json()
self._parse_voxel_size()
self.layers = [self._parse_layer(i) for i in self._json["layers"]]
def _parse_layer(self, layer_data):
if layer_data["type"] == "image":
return self._parse_image_layer(layer_data)
elif layer_data["type"] == "annotation":
return self._parse_annotation_layer(layer_data)
else:
return
def _parse_annotation_layer(self, layer_data):
# points in physical units
for i in layer_data["annotations"]:
if i["type"] != "point":
continue
if "description" in i.keys():
self.points[i["description"].strip()] = i["point"]
else:
self.points[f"{i['id']}"] = i["point"]
return layer_data
def _parse_image_layer(self, layer_data):
vol = CloudVolume(layer_data["source"]["url"].split("precomputed://")[-1])
self.image_shape = np.array(vol.scales[0]["size"])
# converting from nm to um
self.image_voxel_size = np.array(vol.scales[0]["resolution"]) / 1e3
self.voxel_origin = self.image_shape / 2
self.physical_origin = self.voxel_origin * self.image_voxel_size
return layer_data
def _parse_voxel_size(self):
dims = self._json["dimensions"]
x_size_m, y_size_m, z_size_m = dims["x"][0], dims["y"][0], dims["z"][0]
# converting from m to um
self.points_voxel_size = np.array([x_size_m, y_size_m, z_size_m]) * 1e6
class Fiducial:
def __init__(self, point, orientation, image_shape, voxel_size, description=""):
"""
point: 3D point in physical space of fiducial (array-like len 3)
image_size: size in physical units of native res image in each dim (array-like len 3)
"""
self.image_shape = np.asarray(image_shape)
self.voxel_size = np.asarray(voxel_size)
self._set_origin()
self.point = np.asarray(point) - self.origin
self.description = description
self.orientation = orientation
self.ng_point = np.asarray(point)
def _set_origin(self):
self.origin = (self.image_shape - 1) * self.voxel_size / 2
def reorient_point(self, out_orient):
dimension = len(self.point)
in_orient = str(self.orientation).lower()
out_orient = str(out_orient).lower()
inDirection = ""
outDirection = ""
orientToDirection = {"r": "r", "l": "r", "s": "s", "i": "s", "a": "a", "p": "a"}
for i in range(dimension):
try:
inDirection += orientToDirection[in_orient[i]]
except BaseException:
raise Exception("in_orient '{0}' is invalid.".format(in_orient))
try:
outDirection += orientToDirection[out_orient[i]]
except BaseException:
raise Exception("out_orient '{0}' is invalid.".format(out_orient))
if len(set(inDirection)) != dimension:
raise Exception("in_orient '{0}' is invalid.".format(in_orient))
if len(set(outDirection)) != dimension:
raise Exception("out_orient '{0}' is invalid.".format(out_orient))
order = []
flip = []
for i in range(dimension):
j = inDirection.find(outDirection[i])
order += [j]
flip += [in_orient[j] != out_orient[i]]
new_point = self._flip_point(self.point, axis=flip)
new_point = new_point[order]
# update self
self.point = new_point
self.orientation = out_orient
return new_point
def _reorient_point(self, out_orient):
dimension = len(self.point)
in_orient = str(self.orientation).lower()
out_orient = str(out_orient).lower()
inDirection = ""
outDirection = ""
orientToDirection = {"r": "r", "l": "r", "s": "s", "i": "s", "a": "a", "p": "a"}
for i in range(dimension):
try:
inDirection += orientToDirection[in_orient[i]]
except BaseException:
raise Exception("in_orient '{0}' is invalid.".format(in_orient))
try:
outDirection += orientToDirection[out_orient[i]]
except BaseException:
raise Exception("out_orient '{0}' is invalid.".format(out_orient))
if len(set(inDirection)) != dimension:
raise Exception("in_orient '{0}' is invalid.".format(in_orient))
if len(set(outDirection)) != dimension:
raise Exception("out_orient '{0}' is invalid.".format(out_orient))
order = []
flip = []
for i in range(dimension):
j = inDirection.find(outDirection[i])
order += [j]
flip += [in_orient[j] != out_orient[i]]
new_point = self._flip_point(self.point, axis=flip)
new_point = new_point[order]
# update self
self.orientation = out_orient
self.point = new_point
return new_point
def _flip_point(self, point, axis=0):
tmp_point = point.copy()
tmp_point[axis] = -point[axis]
return tmp_point
def __str__(self):
return f"{self.description}: [{self.point[0]}, {self.point[1]}, {self.point[2]} ]\norientation: {self.orientation}"
def get_distances(points1, points2):
distances = {}
for i in points1.keys():
try:
distances[i] = np.linalg.norm(points1[i] - points2[i])
except KeyError:
continue
# distances[i] = np.linalg.norm(points1[i] - points2[i.lower()])
return distances
def compute_regisration_accuracy(
target_viz_link,
atlas_viz_link,
affine_path,
velocity_path,
# voxel size of velocity field
velocity_field_vsize,
atlas_orientation="PIR",
target_orientation="LPS",
):
# get json link from viz link
target_viz = NGLink(target_viz_link.split("json_url=")[-1])
atlas_viz = NGLink(atlas_viz_link.split("json_url=")[-1])
# get origin-centered fiducials from viz link
atlas_fiducials = [
Fiducial(
j,
atlas_orientation,
atlas_viz.image_shape,
atlas_viz.image_voxel_size,
description=i,
)
for i, j in atlas_viz.get_points_in("physical").items()
]
target_fiducials = [
Fiducial(
j,
target_orientation,
target_viz.image_shape,
target_viz.image_voxel_size,
description=i,
)
for i, j in target_viz.get_points_in("physical").items()
]
# run matlab command to get transformed fiducials
if affine_path != "" and velocity_path != "":
points = [i.point for i in target_fiducials]
points_string = [", ".join(map(str, i)) for i in points]
points_string = "; ".join(points_string)
# velocity field voxel size
v_size = ", ".join(str(i) for i in velocity_field_vsize)
# get current file path and set path to transform_points
# base_path = pathlib.Path(__file__).parent.parent.absolute() / 'registration'
base_path = os.path.expanduser("~/CloudReg/registration")
transformed_points_path = "./transformed_points.mat"
matlab_path = 'matlab'
matlab_command = f"""
{matlab_path} -nodisplay -nosplash -nodesktop -r \"addpath(\'{base_path}\');Aname=\'{affine_path}\';vname=\'{velocity_path}\';v_size=[{v_size}];points=[{points_string}];points_t = transform_points(points,Aname,vname,v_size,\'atlas\');save(\'./transformed_points.mat\',\'points_t\');exit;\"
"""
print(matlab_command)
subprocess.run(shlex.split(matlab_command),)
# transformed_points.m created now
points_t = loadmat(transformed_points_path)["points_t"]
points = {i.description: j for i, j in zip(target_fiducials, points_t)}
points_ng = {i.description: np.array(j) + k.origin for i, j, k in zip(target_fiducials, points_t, atlas_fiducials)}
points_ng_json = target_viz.get_annotations(points_ng)
with open('./transformed_points.json', 'w') as fp:
json.dump(points_ng_json,fp)
else:
points = {i.description: i.point for i in target_fiducials}
# points_ng = [np.array(j) + k.origin for j, k in zip(,rgeti atlas_fiducials)]
atlas_points = {i.description: i.point for i in atlas_fiducials}
distances = get_distances(atlas_points, points)
[print(i, j) for i, j in distances.items()]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Compute registration accuracy given 2 sets of fiducials from target to atlas"
)
parser.add_argument(
"-target_viz_link", help="viz link to target with fiducials labelled.", type=str
)
parser.add_argument(
"-atlas_viz_link", help="viz link to atlas with fiducials labelled", type=str
)
parser.add_argument(
"--affine_path",
help="S3 path or local path to matlab transformation files. These will be downloaded to compute the fiducial accuracy",
type=str,
default="",
)
parser.add_argument(
"--velocity_path",
help="S3 path ot local matlab transformation files. These will be downloaded to compute the fiducial accuracy",
type=str,
default="",
)
parser.add_argument(
"--velocity_voxel_size",
help="Voxel size of velocity field in microns",
nargs="+",
type=float,
default=[50.0] * 3,
)
parser.add_argument(
"--atlas_orientation",
help="3-letter orientation of the atlas data. Default is PIR for Allen Reference Atlas.",
type=str,
default="PIR",
)
parser.add_argument(
"--target_orientation",
help="3-letter orientation of the target data. Default is LPS.",
type=str,
default="LPS",
)
# parser.add_argument('-ssh_key_path', help='path to identity file used to ssh into given instance')
# parser.add_argument('-instance_id', help='EC2 Instance ID of instance to run COLM pipeline on.')
# parser.add_argument('--instance_type', help='EC2 instance type to run pipeline on. minimum r5d.16xlarge', type=str, default='r5d.16xlarge')
args = parser.parse_args()
if args.affine_path.startswith("s3://"):
# download affine mat to local storage
aws_cli(shlex.split(f"s3 cp {args.affine_path} ./A.mat"))
args.affine_path = "./A.mat"
if args.velocity_path.startswith("s3://"):
# download velocity mat to local storage
aws_cli(shlex.split(f"s3 cp {args.velocity_path} ./v.mat"))
args.velocity_path = "./v.mat"
compute_regisration_accuracy(
args.target_viz_link,
args.atlas_viz_link,
args.affine_path,
args.velocity_path,
args.velocity_voxel_size,
args.atlas_orientation,
args.target_orientation,
) | PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/mode/sass/sass.js | CodeMirror.defineMode("sass", function(config) {
var tokenRegexp = function(words){
return new RegExp("^" + words.join("|"));
};
var keywords = ["true", "false", "null", "auto"];
var keywordsRegexp = new RegExp("^" + keywords.join("|"));
var operators = ["\\(", "\\)", "=", ">", "<", "==", ">=", "<=", "\\+", "-", "\\!=", "/", "\\*", "%", "and", "or", "not"];
var opRegexp = tokenRegexp(operators);
var pseudoElementsRegexp = /^::?[\w\-]+/;
var urlTokens = function(stream, state){
var ch = stream.peek();
if (ch === ")"){
stream.next();
state.tokenizer = tokenBase;
return "operator";
}else if (ch === "("){
stream.next();
stream.eatSpace();
return "operator";
}else if (ch === "'" || ch === '"'){
state.tokenizer = buildStringTokenizer(stream.next());
return "string";
}else{
state.tokenizer = buildStringTokenizer(")", false);
return "string";
}
};
var multilineComment = function(stream, state) {
if (stream.skipTo("*/")){
stream.next();
stream.next();
state.tokenizer = tokenBase;
}else {
stream.next();
}
return "comment";
};
var buildStringTokenizer = function(quote, greedy){
if(greedy == null){ greedy = true; }
function stringTokenizer(stream, state){
var nextChar = stream.next();
var peekChar = stream.peek();
var previousChar = stream.string.charAt(stream.pos-2);
var endingString = ((nextChar !== "\\" && peekChar === quote) || (nextChar === quote && previousChar !== "\\"));
/*
console.log("previousChar: " + previousChar);
console.log("nextChar: " + nextChar);
console.log("peekChar: " + peekChar);
console.log("ending: " + endingString);
*/
if (endingString){
if (nextChar !== quote && greedy) { stream.next(); }
state.tokenizer = tokenBase;
return "string";
}else if (nextChar === "#" && peekChar === "{"){
state.tokenizer = buildInterpolationTokenizer(stringTokenizer);
stream.next();
return "operator";
}else {
return "string";
}
}
return stringTokenizer;
};
var buildInterpolationTokenizer = function(currentTokenizer){
return function(stream, state){
if (stream.peek() === "}"){
stream.next();
state.tokenizer = currentTokenizer;
return "operator";
}else{
return tokenBase(stream, state);
}
};
};
var indent = function(state){
if (state.indentCount == 0){
state.indentCount++;
var lastScopeOffset = state.scopes[0].offset;
var currentOffset = lastScopeOffset + config.indentUnit;
state.scopes.unshift({ offset:currentOffset });
}
};
var dedent = function(state){
if (state.scopes.length == 1) { return; }
state.scopes.shift();
};
var tokenBase = function(stream, state) {
var ch = stream.peek();
// Single line Comment
if (stream.match('//')) {
stream.skipToEnd();
return "comment";
}
// Multiline Comment
if (stream.match('/*')){
state.tokenizer = multilineComment;
return state.tokenizer(stream, state);
}
// Interpolation
if (stream.match('#{')){
state.tokenizer = buildInterpolationTokenizer(tokenBase);
return "operator";
}
if (ch === "."){
stream.next();
// Match class selectors
if (stream.match(/^[\w-]+/)){
indent(state);
return "atom";
}else if (stream.peek() === "#"){
indent(state);
return "atom";
}else{
return "operator";
}
}
if (ch === "#"){
stream.next();
// Hex numbers
if (stream.match(/[0-9a-fA-F]{6}|[0-9a-fA-F]{3}/)){
return "number";
}
// ID selectors
if (stream.match(/^[\w-]+/)){
indent(state);
return "atom";
}
if (stream.peek() === "#"){
indent(state);
return "atom";
}
}
// Numbers
if (stream.match(/^-?[0-9\.]+/)){
return "number";
}
// Units
if (stream.match(/^(px|em|in)\b/)){
return "unit";
}
if (stream.match(keywordsRegexp)){
return "keyword";
}
if (stream.match(/^url/) && stream.peek() === "("){
state.tokenizer = urlTokens;
return "atom";
}
// Variables
if (ch === "$"){
stream.next();
stream.eatWhile(/[\w-]/);
if (stream.peek() === ":"){
stream.next();
return "variable-2";
}else{
return "variable-3";
}
}
if (ch === "!"){
stream.next();
if (stream.match(/^[\w]+/)){
return "keyword";
}
return "operator";
}
if (ch === "="){
stream.next();
// Match shortcut mixin definition
if (stream.match(/^[\w-]+/)){
indent(state);
return "meta";
}else {
return "operator";
}
}
if (ch === "+"){
stream.next();
// Match shortcut mixin definition
if (stream.match(/^[\w-]+/)){
return "variable-3";
}else {
return "operator";
}
}
// Indent Directives
if (stream.match(/^@(else if|if|media|else|for|each|while|mixin|function)/)){
indent(state);
return "meta";
}
// Other Directives
if (ch === "@"){
stream.next();
stream.eatWhile(/[\w-]/);
return "meta";
}
// Strings
if (ch === '"' || ch === "'"){
stream.next();
state.tokenizer = buildStringTokenizer(ch);
return "string";
}
// Pseudo element selectors
if (ch == ':' && stream.match(pseudoElementsRegexp)){
return "keyword";
}
// atoms
if (stream.eatWhile(/[\w-&]/)){
// matches a property definition
if (stream.peek() === ":" && !stream.match(pseudoElementsRegexp, false))
return "property";
else
return "atom";
}
if (stream.match(opRegexp)){
return "operator";
}
// If we haven't returned by now, we move 1 character
// and return an error
stream.next();
return null;
};
var tokenLexer = function(stream, state) {
if (stream.sol()){
state.indentCount = 0;
}
var style = state.tokenizer(stream, state);
var current = stream.current();
if (current === "@return"){
dedent(state);
}
if (style === "atom"){
indent(state);
}
if (style !== null){
var startOfToken = stream.pos - current.length;
var withCurrentIndent = startOfToken + (config.indentUnit * state.indentCount);
var newScopes = [];
for (var i = 0; i < state.scopes.length; i++){
var scope = state.scopes[i];
if (scope.offset <= withCurrentIndent){
newScopes.push(scope);
}
}
state.scopes = newScopes;
}
return style;
};
return {
startState: function() {
return {
tokenizer: tokenBase,
scopes: [{offset: 0, type: 'sass'}],
definedVars: [],
definedMixins: []
};
},
token: function(stream, state) {
var style = tokenLexer(stream, state);
state.lastToken = { style: style, content: stream.current() };
return style;
},
indent: function(state) {
return state.scopes[0].offset;
}
};
});
CodeMirror.defineMIME("text/x-sass", "sass"); | PypiClean |
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/proxywriter.py | import new
import weakref
import operator
from amara.namespaces import EXTENSION_NAMESPACE
from amara.lib.xmlstring import isspace
from amara.writers import streamwriter, textwriter, htmlwriter, xmlwriter
from amara.xslt import XsltError
_TEXT_METHOD = (None, 'text')
_HTML_METHOD = (None, 'html')
_XML_METHOD = (None, 'xml')
_XHTML_METHOD = (EXTENSION_NAMESPACE, 'xhtml') #Coming later
_C14N_METHOD = (EXTENSION_NAMESPACE, 'c14n') #Coming later
class proxymethod(object):
__slots__ = ('_name', '_func', '_refs')
def __init__(self, func):
self._name = func.__name__
self._func = func
self._refs = []
def update(self, obj, cls,
_instancemethod=new.instancemethod,
_members=operator.attrgetter('im_func', 'im_self', 'im_class',
'__call__')):
func = getattr(obj, self._name)
try:
func = func.im_func
except AttributeError:
for ref in self._refs:
proxy = ref()
if proxy and proxy.im_self is obj:
class proxyfunction(object):
__call__ = func.__call__
proxy.__class__ = proxyfunction
else:
for ref in self._refs:
proxy = ref()
if proxy and proxy.im_self is obj:
method = _instancemethod(func, obj, cls)
class proxymethod(object):
im_func, im_self, im_class, __call__ = _members(method)
proxy.__class__ = proxymethod
def __get__(self, obj, cls,
_instancemethod=new.instancemethod,
_members=operator.attrgetter('im_func', 'im_self', 'im_class',
'__call__')):
method = _instancemethod(self._func, obj, cls)
class proxymethod(object):
im_func, im_self, im_class, __call__ = _members(method)
proxy = proxymethod()
self._refs.append(weakref.ref(proxy, self._refs.remove))
return proxy
class proxywriter(streamwriter):
_methods = {
_TEXT_METHOD : textwriter.textwriter,
_HTML_METHOD : htmlwriter.htmlwriter,
_XML_METHOD : xmlwriter.xmlwriter,
}
class __metaclass__(type):
def __init__(cls, name, bases, namespace):
cls.__proxymethods__ = tuple(
obj for obj in namespace.itervalues()
if isinstance(obj, proxymethod))
@classmethod
def _lookup(cls, output_parameters):
method = output_parameters.method
try:
cls = cls._methods[method]
except KeyError:
if method[0] is None:
# display only localName if in the null namespace
method = method[1]
raise XsltError(XsltError.UNKNOWN_OUTPUT_METHOD, str(method))
if (cls is xmlwriter.xmlwriter and
output_parameters.cdata_section_elements):
cls = xmlwriter.cdatasectionwriter
return cls
def __new__(cls, output_parameters, stream):
# Attempt to switch to the "true" writer as soon as possible
if output_parameters.method:
return cls._lookup(output_parameters)(output_parameters, stream)
return object.__new__(cls)
def __init__(self, output_parameters, stream):
streamwriter.__init__(self, output_parameters, stream)
self._stack = []
return
def _finalize(self, method):
self.output_parameters.setdefault('method', method)
writer_class = self._lookup(self.output_parameters)
# Save our instance variables for use after reinitializing
stack = self._stack
del self._stack
self.__class__ = writer_class
for proxy in proxywriter.__proxymethods__:
proxy.update(self, writer_class)
# Do the saved callbacks
get_command = self.__getattribute__
for cmd, args, kw in stack:
get_command(cmd)(*args, **kw)
return
@proxymethod
def start_document(self, *args, **kwds):
self._stack.append(('start_document', args, kwds))
return
@proxymethod
def end_document(self, *args, **kw):
# We haven't chosen an output method yet, use default.
self._stack.append(('end_document', args, kw))
self._finalize(_XML_METHOD)
return
@proxymethod
def start_element(self, name, namespace=None, *args, **kw):
self._stack.append(('start_element', (name, namespace) + args, kw))
if namespace is None and name.lower() == 'html':
self._finalize(_HTML_METHOD)
else:
self._finalize(_XML_METHOD)
return
@proxymethod
def end_element(self, *args, **kw):
self._stack.append(('end_element', args, kw))
return
@proxymethod
def namespace(self, *args, **kw):
self._stack.append(('namespace', args, kw))
return
@proxymethod
def attribute(self, *args, **kw):
self._stack.append(('attribute', args, kw))
return
@proxymethod
def text(self, *args, **kw):
self._stack.append(('text', args, kw))
# Non-whitespace characters, cannot be HTML/XHTML
if not isspace(args[0]):
self._finalize(_XML_METHOD)
return
@proxymethod
def processing_instruction(self, *args, **kw):
self._stack.append(('processing_instruction', args, kw))
return
@proxymethod
def comment(self, *args, **kw):
self._stack.append(('comment', args, kw))
return | PypiClean |
/CmonCrawl-1.0.3.tar.gz/CmonCrawl-1.0.3/docs/source/generated/cmoncrawl.common.types.ExtractorConfig.rst | cmoncrawl.common.types.ExtractorConfig
======================================
.. currentmodule:: cmoncrawl.common.types
.. autoclass:: ExtractorConfig
.. automethod:: __init__
.. rubric:: Methods
.. autosummary::
:toctree:
~ExtractorConfig.__init__
~ExtractorConfig.from_dict
~ExtractorConfig.from_json
~ExtractorConfig.schema
~ExtractorConfig.to_dict
~ExtractorConfig.to_json
.. rubric:: Attributes
.. autosummary::
~ExtractorConfig.since
~ExtractorConfig.to
~ExtractorConfig.name
| PypiClean |
/CaptureMock-2.3.0.tar.gz/CaptureMock-2.3.0/capturemock/pythontraffic.py |
import sys, types, inspect, re
from pprint import pformat
from threading import RLock
from . import traffic
from .recordfilehandler import RecordFileHandler
from .config import CaptureMockReplayError
class PythonWrapper(object):
def __init__(self, name, target):
self.name = name
self.target = target
self.allInstances[self.name] = self
self.wrappersByInstance[self.getId(self.target)] = self
self.doneFullRepr = False
def addNumericPostfix(self, stem):
num = 1
while stem + str(num) in self.allInstances:
num += 1
return stem + str(num)
@classmethod
def getId(cls, target):
return id(target)
@classmethod
def getWrapperFor(cls, instance, *args):
storedWrapper = cls.wrappersByInstance.get(cls.getId(instance))
return storedWrapper or cls(instance, *args)
@classmethod
def hasWrapper(cls, instance):
return cls.getId(instance) in cls.wrappersByInstance
@classmethod
def resetCaches(cls):
cls.allInstances = {}
cls.wrappersByInstance = {}
def __repr__(self):
if self.doneFullRepr:
return self.name
self.doneFullRepr = True
return self.getFullRepr()
class PythonInstanceWrapper(PythonWrapper):
allInstances = {}
wrappersByInstance = {}
classDescriptions = {}
def __init__(self, instance, classDesc, namingHint=None):
self.classDesc = classDesc
if classDesc not in self.classDescriptions:
self.classDescriptions[classDesc.split("(")[0]] = classDesc
name = self.getNewInstanceName(namingHint)
super(PythonInstanceWrapper, self).__init__(name, instance)
@classmethod
def resetCaches(cls):
super(PythonInstanceWrapper, cls).resetCaches()
cls.classDescriptions = {}
@classmethod
def renameInstance(cls, instanceName, namingHint):
if instanceName in cls.allInstances:
return cls.allInstances[instanceName].rename(namingHint)
def shouldRename(self):
className = self.getClassName()
return self.name.replace(className, "").isdigit()
def rename(self, namingHint):
if self.shouldRename():
del self.allInstances[self.name]
self.name = self.getNewInstanceName(namingHint)
self.allInstances[self.name] = self
return self.name
def getFullRepr(self):
return "Instance(" + repr(self.classDesc) + ", " + repr(self.name) + ")"
def getClassName(self):
return self.classDesc.split("(")[0].lower()
def getNewInstanceName(self, namingHint):
className = self.getClassName()
if namingHint:
className += "_" + namingHint
if className not in self.allInstances:
return className
return self.addNumericPostfix(className)
def createProxy(self, proxy):
return proxy.captureMockCreateInstanceProxy(self.name, self.target, self.classDesc)
def isBuiltin(cls):
return cls.__module__ in [ "__builtin__", "builtins" ]
def getFullClassName(cls):
classStr = cls.__name__
if not isBuiltin(cls):
classStr = cls.__module__ + "." + classStr
return classStr
class PythonCallbackWrapper(PythonWrapper):
allInstances = {}
wrappersByInstance = {}
def __init__(self, function, proxy, name):
if name in self.allInstances:
name = self.addNumericPostfix(name)
target = function.captureMockTarget if hasattr(function, "captureMockTarget") else function
super(PythonCallbackWrapper, self).__init__(name, target)
self.proxy = proxy.captureMockCreateInstanceProxy(self.name, self.target, captureMockCallback=True)
def hasExternalName(self):
return isBuiltin(self.target) or "." in self.name
def getFullRepr(self):
return self.name if self.hasExternalName() else "Callback('" + self.name + "')"
@classmethod
def getId(cls, target):
return id(target.__func__) if not hasattr(target, "captureMockTarget") and hasattr(target, "__func__") else id(target)
def createProxy(self, proxy):
if isBuiltin(self.target):
return self.target
else:
return self.proxy
class PythonTraffic(traffic.BaseTraffic):
typeId = "PYT"
direction = "<-"
def getAlterationSectionNames(self):
return [ self.getTextMarker(), "python", "general" ]
def getTextMarker(self):
return self.text
def getExceptionResponse(self, exc_info, inCallback):
exc_value = exc_info[1]
return PythonResponseTraffic(self.getExceptionText(exc_value), inCallback=inCallback)
def getExceptionText(self, exc_value):
text = "raise " + getFullClassName(exc_value.__class__) + "(" + repr(str(exc_value)) + ")"
return self.applyAlterations(text)
class PythonImportTraffic(PythonTraffic):
def __init__(self, inText, *args):
self.moduleName = inText
text = "import " + self.moduleName
super(PythonImportTraffic, self).__init__(text, *args)
def isMarkedForReplay(self, replayItems, responses):
return self.getDescription() in responses
class ReprObject:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return self.arg
class DictForRepr(object):
def __init__(self, thedict):
self.thedict = thedict
def __getattr__(self, name):
return getattr(self.thedict, name)
def __getitem__(self, *args):
return self.thedict.__getitem__(*args)
def __setitem__(self, *args):
return self.thedict.__setitem__(*args)
def __repr__(self):
return pformat(self.thedict, width=130)
def extendDirection(direction):
extra = "----"
return direction + extra if direction.startswith("<") else extra + direction
class PythonModuleTraffic(PythonTraffic):
def __init__(self, text, rcHandler, interceptModules, inCallback):
super(PythonModuleTraffic, self).__init__(text, rcHandler)
self.interceptModules = interceptModules
if inCallback:
self.direction = extendDirection(self.direction)
def getModuleName(self, obj):
if hasattr(obj, "__module__"): # classes, functions, many instances
return obj.__module__
else:
return self.getClass(obj).__module__ # many other instances
def getClass(self, obj):
return obj.__class__ if hasattr(obj, "__class__") else type(obj)
def insertReprObjects(self, arg):
if hasattr(arg, "captureMockProxyName"):
return ReprObject(arg.captureMockProxyName)
elif isinstance(arg, float):
# Stick to 2 dp for recording floating point values
return ReprObject(str(round(arg, 2)))
elif isinstance(arg, dict):
return DictForRepr(arg)
else:
return ReprObject(self.fixMultilineStrings(arg))
def isCallableType(self, obj):
cacheTypes = (types.FunctionType, types.GeneratorType, types.MethodType, types.BuiltinFunctionType,
type, types.ModuleType)
if sys.version_info[0] == 2:
cacheTypes += (types.ClassType,)
return type(obj) in cacheTypes or hasattr(obj, "__call__")
def isMarkedForReplay(self, replayItems, responses):
fullTextMarker = self.direction + self.typeId + ":" + self.getTextMarker()
return any((item == fullTextMarker or item.startswith(fullTextMarker + "(") for item in responses))
def getIntercept(self, modOrAttr):
if modOrAttr in self.interceptModules:
return modOrAttr
elif "." in modOrAttr:
return self.getIntercept(modOrAttr.rsplit(".", 1)[0])
def isBasicType(self, obj):
basicTypes = (bool, float, int, str, list, dict, tuple)
if sys.version_info[0] == 2:
basicTypes += (long, unicode)
return obj is None or obj is NotImplemented or type(obj) in basicTypes
def isIterator(self, obj):
return hasattr(obj, "__iter__") and (hasattr(obj, "next") or hasattr(obj, "__next__"))
def getResultText(self, result):
text = repr(self.transformStructure(result, self.insertReprObjects))
return self.applyAlterations(text)
def transformResponse(self, response, proxy):
wrappedValue = self.transformStructure(response, self.addInstanceWrapper, responseIsBasic=self.isBasicType(response))
responseText = self.getResultText(wrappedValue)
transformedResponse = self.transformStructure(wrappedValue, self.insertProxy, proxy)
return responseText, transformedResponse
def transformStructure(self, result, transformMethod, *args, **kw):
if type(result) in (list, tuple):
return type(result)([ self.transformStructure(elem, transformMethod, *args, **kw) for elem in result ])
elif type(result) == dict:
newResult = {}
for key, value in result.items():
newResult[key] = self.transformStructure(value, transformMethod, *args, **kw)
return transformMethod(newResult, *args, **kw)
else:
return transformMethod(result, *args, **kw)
def addInstanceWrapper(self, result, **kw):
# We add wrappers if we aren't already a proxy, if we're a complex type, and if we're either being intercepted, or an iterator
# Iterators returned from proxy operations do not follow repr-eval, so we need to intercept them too
if not hasattr(result, "captureMockProxyName") and not self.isBasicType(result) and \
(self.getIntercept(self.getModuleName(result)) or self.isIterator(result)):
return self.getWrapper(result, **kw)
else:
return result
def insertProxy(self, result, proxy):
if isinstance(result, PythonInstanceWrapper):
return result.createProxy(proxy)
else:
return result
def instanceHasAttribute(self, instance, attr):
# hasattr fails if the intercepted instance defines __getattr__, when it always returns True
# dir() can throw exceptions if __dir__ does (instance can be anything at all, and its __dir__ might raise anything at all)
try:
return attr in dir(instance)
except:
return False
def getWrapper(self, instance, namingHint=None, **kw):
# hasattr fails if the intercepted instance defines __getattr__, when it always returns True
if self.instanceHasAttribute(instance, "captureMockTarget"):
return self.getWrapper(instance.captureMockTarget, namingHint=namingHint)
classDesc = self.getClassDescription(self.getClass(instance))
return PythonInstanceWrapper.getWrapperFor(instance, classDesc, namingHint)
def getClassDescription(self, cls):
if cls.__name__ in PythonInstanceWrapper.classDescriptions:
return cls.__name__
baseClasses = self.findRelevantBaseClasses(cls)
if len(baseClasses):
return cls.__name__ + "(" + ", ".join(baseClasses) + ")"
else:
return cls.__name__
def findRelevantBaseClasses(self, cls):
classes = []
for baseClass in inspect.getmro(cls)[1:]:
name = baseClass.__name__
if self.getIntercept(baseClass.__module__):
classes.append(name)
else:
name = getFullClassName(baseClass)
# No point recording 'object' in Python3: everything is an object there
if name != "object" or sys.version_info[0] == 2:
classes.append(name)
break
return classes
class PythonAttributeTraffic(PythonModuleTraffic):
cachedAttributes = {}
cachedInstances = {}
@classmethod
def resetCaches(cls):
cls.cachedAttributes = {}
cls.cachedInstances = {}
def shouldUpdateCache(self, obj):
if self.isBasicType(obj):
if self.text in self.cachedAttributes:
cachedObj = self.cachedAttributes.get(self.text)
if cachedObj == obj:
return False
self.cachedAttributes[self.text] = obj
return True
else:
return self.text not in self.cachedInstances
def shouldCache(self, obj):
return not self.isCallableType(obj)
def getWrapper(self, instance, namingHint=None, responseIsBasic=True):
wrapper = PythonModuleTraffic.getWrapper(self, instance, namingHint=namingHint)
return wrapper if responseIsBasic else self.cachedInstances.setdefault(self.text, wrapper)
class PythonSetAttributeTraffic(PythonModuleTraffic):
def __init__(self, rcHandler, interceptModules, inCallback, proxyName, attrName, value):
text = proxyName + "." + attrName + " = " + repr(self.insertReprObjects(value))
super(PythonSetAttributeTraffic, self).__init__(text, rcHandler, interceptModules, inCallback)
class PythonFunctionCallTraffic(PythonModuleTraffic):
cachedFunctions = set()
def __init__(self, functionName, rcHandler, interceptModules, proxy, inCallback, *args, **kw):
self.interceptModules = interceptModules
if proxy.captureMockCallback:
self.direction = "--->"
self.functionName = functionName
self.args = args
self.kw = kw # Prevent naming hints being added when transforming arguments
self.args = self.transformStructure(args, self.transformArg, proxy)
self.kw = self.transformStructure(kw, self.transformArg, proxy)
argsForRecord = self.transformStructure(list(self.args), self.insertReprObjects)
keywForRecord = self.transformStructure(self.kw, self.insertReprObjects)
for key in sorted(keywForRecord.keys()):
value = keywForRecord[key]
recordArg = ReprObject(key + "=" + repr(value))
argsForRecord.append(recordArg)
text = functionName + "(" + repr(argsForRecord)[1:-1] + ")"
super(PythonFunctionCallTraffic, self).__init__(text, rcHandler, interceptModules, inCallback)
self.text = self.applyAlterations(self.text)
checkRepeats = rcHandler.getboolean("check_repeated_calls", [ self.getIntercept(self.functionName), "python" ], True)
if checkRepeats:
self.shouldRecord = True
else:
self.shouldRecord = self.functionName not in self.cachedFunctions
self.cachedFunctions.add(self.functionName)
def getTextMarker(self):
return self.functionName
def transformArg(self, arg, proxy):
if proxy.captureMockCallback:
return self.addInstanceWrapper(arg)
elif self.isRealArgCallable(arg) and not self.getIntercept(self.getModuleName(arg)):
return PythonCallbackWrapper.getWrapperFor(arg, proxy, self.getCallbackName(arg))
else:
return arg
def getCallbackName(self, arg):
if hasattr(arg, "captureMockTarget"):
return arg.captureMockProxyName
elif isBuiltin(arg):
return arg.__name__
callbackName = self.functionName.split(".")[0]
hint = self.getNamingHint()
if hint:
callbackName += "_" + hint
callbackName += "_callback"
return callbackName
def isRealArgCallable(self, arg):
if hasattr(arg, "captureMockTarget"):
return self.isCallableType(arg.captureMockTarget)
else:
return self.isCallableType(arg)
def switchProxies(self, arg, captureMockProxy):
# we need our proxies present in system calls, and absent in real calls to intercepted code
# So we remove them from normal function calls, and add them in callbacks
if "<" in self.direction:
if hasattr(arg, "captureMockTarget"):
if not arg.captureMockCallback:
return arg.captureMockTarget
elif isinstance(arg, PythonCallbackWrapper):
return arg.createProxy(captureMockProxy)
else:
if isinstance(arg, PythonInstanceWrapper):
return self.insertProxy(arg, captureMockProxy)
return arg
# Naming to avoid clashes as args and kw come from the application
def callRealFunction(self, captureMockFunction, captureMockRecordHandler, captureMockProxy):
realArgs = self.transformStructure(self.args, self.switchProxies, captureMockProxy)
realKw = self.transformStructure(self.kw, self.switchProxies, captureMockProxy)
try:
return captureMockFunction(*realArgs, **realKw)
except:
exc_value = sys.exc_info()[1]
moduleName = self.getModuleName(exc_value)
if self.getIntercept(moduleName):
# We own the exception object also, handle it like an ordinary instance
wrapper = self.getWrapper(exc_value)
responseText = "raise " + repr(wrapper)
PythonResponseTraffic(responseText).record(captureMockRecordHandler)
raise self.insertProxy(wrapper, captureMockProxy)
else:
responseText = self.getExceptionText(exc_value)
PythonResponseTraffic(responseText).record(captureMockRecordHandler)
raise
def tryRenameInstance(self, proxy, recordHandler):
if self.functionName.count(".") == 1:
objName, localName = self.functionName.split(".")
if localName.startswith("set") or localName.startswith("Set"):
hint = self.getNamingHint()
if hint:
newName = PythonInstanceWrapper.renameInstance(objName, hint)
if newName is not None:
proxy.captureMockNameFinder.rename(objName, newName)
recordHandler.rerecord(objName, newName)
def makePythonName(self, arg):
# Swiped from http://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python
return re.sub('\W|^(?=\d)','_', arg.strip().lower())
def getNamingHint(self):
def isSuitable(arg):
return isinstance(arg, str) and "\n" not in arg and len(arg) < 20 # Don't use long arguments
for arg in self.args:
if isSuitable(arg):
return self.makePythonName(arg)
for _, arg in sorted(self.kw.items()):
if isSuitable(arg):
return self.makePythonName(arg)
def getWrapper(self, instance, **kw):
return PythonModuleTraffic.getWrapper(self, instance, namingHint=self.getNamingHint())
class PythonResponseTraffic(traffic.BaseTraffic):
typeId = "RET"
direction = "->"
def __init__(self, text, rcHandler=None, callback=False, inCallback=False):
if callback:
self.direction = "<---"
elif inCallback:
self.direction = extendDirection(self.direction)
super(PythonResponseTraffic, self).__init__(text, rcHandler)
class PythonTrafficHandler:
def __init__(self, replayInfo, recordFile, rcHandler, callStackChecker, interceptModules):
self.replayInfo = replayInfo
self.recordFileHandler = RecordFileHandler(recordFile)
self.callStackChecker = callStackChecker
self.rcHandler = rcHandler
self.interceptModules = interceptModules
self.lock = RLock()
PythonInstanceWrapper.resetCaches() # reset, in case of previous tests
PythonCallbackWrapper.resetCaches()
PythonAttributeTraffic.resetCaches()
def importModule(self, name, proxy, loadModule):
with self.lock:
traffic = PythonImportTraffic(name, self.rcHandler)
if self.callStackChecker.callerExcluded(stackDistance=3):
return loadModule(name)
self.record(traffic)
if self.replayInfo.isActiveFor(traffic):
return self.processReplay(traffic, proxy)
else:
try:
return self.callStackChecker.callNoInterception(False, loadModule, name)
except:
response = traffic.getExceptionResponse(sys.exc_info(), self.callStackChecker.inCallback)
self.record(response)
raise
def record(self, traffic):
traffic.record(self.recordFileHandler, truncationPoint="Instance('" in traffic.text)
def evaluateForReplay(self, traffic, proxy):
if isinstance(traffic, PythonResponseTraffic) or not "." in traffic.text:
return proxy.captureMockEvaluate(traffic.text)
else:
return self.processReplay(traffic, proxy, callback=True)
def processReplay(self, traffic, proxy, record=True, **kw):
lastResponse = None
for responseClass, responseText in self.getReplayResponses(traffic):
if responseClass is PythonResponseTraffic:
responseTraffic = responseClass(responseText, inCallback=self.callStackChecker.inCallback, **kw)
else:
responseTraffic = responseClass(responseText)
responseTraffic.direction = "--->"
if record:
self.record(responseTraffic)
lastResponse = self.evaluateForReplay(responseTraffic, proxy)
return lastResponse
def getReplayResponses(self, traffic, **kw):
return self.replayInfo.readReplayResponses(traffic, [ PythonTraffic, PythonResponseTraffic ], **kw)
def getRealAttribute(self, target, attrName):
if attrName == "__all__":
# Need to provide something here, the application has probably called 'from module import *'
return [x for x in dir(target) if not x.startswith("__")]
else:
return getattr(target, attrName)
def getAttribute(self, proxyName, attrName, proxy, proxyTarget):
with self.lock:
fullAttrName = proxyName + "." + attrName
if self.callStackChecker.callerExcluded(stackDistance=3):
if proxyTarget is None:
proxyTarget = proxy.captureMockLoadRealModule()
return self.getRealAttribute(proxyTarget, attrName)
else:
traffic = PythonAttributeTraffic(fullAttrName, self.rcHandler, self.interceptModules, self.callStackChecker.inCallback)
if self.replayInfo.isActiveFor(traffic):
return self.getAttributeFromReplay(traffic, proxyTarget, attrName, proxy, fullAttrName)
else:
return self.getAndRecordRealAttribute(traffic, proxyTarget, attrName, proxy, fullAttrName)
def getAttributeFromReplay(self, traffic, proxyTarget, attrName, proxy, fullAttrName):
responses = self.getReplayResponses(traffic, exact=True)
if len(responses):
firstText = responses[0][1]
if traffic.shouldUpdateCache(firstText):
self.record(traffic)
self.recordResponse(firstText)
return proxy.captureMockEvaluate(firstText)
else:
newTarget = getattr(proxyTarget, attrName) if proxyTarget else None
if newTarget is None:
classDesc = self.getReplayClassDefinition(fullAttrName)
if classDesc is not None:
return proxy.captureMockCreateClassProxy(fullAttrName, newTarget, classDesc)
return self.createInstanceProxy(proxy, fullAttrName, newTarget)
def getReplayClassDefinition(self, fullAttrName):
response = self.replayInfo.findResponseToTrafficStartingWith(fullAttrName + "(")
if response is not None and response.startswith("Instance"):
def Instance(classDesc, instanceName):
return classDesc
return eval(response)
def getReplayInstanceName(self, text, proxy):
def Instance(classDesc, instanceName):
return instanceName
if text.startswith("raise "):
proxy.captureMockEvaluate(text) # raise the exception
else:
return eval(text)
def getAndRecordRealAttribute(self, traffic, proxyTarget, attrName, proxy, fullAttrName):
try:
realAttr = self.callStackChecker.callNoInterception(False, self.getRealAttribute, proxyTarget, attrName)
except:
responseTraffic = traffic.getExceptionResponse(sys.exc_info(), self.callStackChecker.inCallback)
if traffic.shouldUpdateCache(responseTraffic.text):
self.record(traffic)
self.record(responseTraffic)
raise
if traffic.shouldCache(realAttr):
if traffic.shouldUpdateCache(realAttr):
self.record(traffic)
if attrName == "__path__":
# don't want to record the real absolute path, which is just hard to filter
# and won't work for lookup anyway
self.recordResponse("['Fake value just to mark that it exists']")
return realAttr
else:
return self.transformResponse(traffic, realAttr, proxy)
else:
return traffic.transformResponse(realAttr, proxy)[1]
else:
if issubclass(type(realAttr), type) or (sys.version_info[0] == 2 and type(realAttr) is types.ClassType):
classDesc = traffic.getClassDescription(realAttr)
return proxy.captureMockCreateClassProxy(fullAttrName, realAttr, classDesc)
else:
return self.createInstanceProxy(proxy, fullAttrName, realAttr)
def createInstanceProxy(self, proxy, fullAttrName, realAttr):
if isinstance(proxy, type):
return proxy.__class__.captureMockCreateInstanceProxy(proxy, fullAttrName, realAttr)
else:
return proxy.captureMockCreateInstanceProxy(fullAttrName, realAttr)
def recordResponse(self, responseText, callback=False):
if responseText != "None":
self.record(PythonResponseTraffic(responseText, callback=callback, inCallback=self.callStackChecker.inCallback))
def transformResponse(self, traffic, response, proxy):
responseText, transformedResponse = traffic.transformResponse(response, proxy)
self.recordResponse(responseText, proxy.captureMockCallback)
return transformedResponse
# Parameter names chosen to avoid potential clashes with args and kw which come from the app
def callFunction(self, captureMockProxyName, captureMockProxy, captureMockFunction, *args, **kw):
with self.lock:
isCallback = captureMockProxy.captureMockCallback
if not self.callStackChecker.callerExcluded(stackDistance=3, callback=isCallback):
traffic = PythonFunctionCallTraffic(captureMockProxyName, self.rcHandler,
self.interceptModules, captureMockProxy,
self.callStackChecker.inCallback, *args, **kw)
replayActive = self.replayInfo.isActiveFor(traffic)
if traffic.shouldRecord and (not isCallback or not replayActive):
self.record(traffic)
if not isCallback and replayActive:
return self.processReplay(traffic, captureMockProxy, traffic.shouldRecord)
else:
traffic.tryRenameInstance(captureMockProxy, self.recordFileHandler)
return self.callRealFunction(traffic, captureMockFunction, captureMockProxy)
# Excluded. Important not to hold the lock while this goes on, it might be time.sleep for example
return captureMockFunction(*args, **kw)
def callRealFunction(self, captureMockTraffic, captureMockFunction, captureMockProxy):
realRet = self.callStackChecker.callNoInterception(captureMockProxy.captureMockCallback,
captureMockTraffic.callRealFunction,
captureMockFunction, self.recordFileHandler,
captureMockProxy)
if captureMockTraffic.shouldRecord:
return self.transformResponse(captureMockTraffic, realRet, captureMockProxy)
else:
return captureMockTraffic.transformResponse(realRet, captureMockProxy)[1]
# Parameter names chosen to avoid potential clashes with args and kw which come from the app
def callConstructor(self, captureMockClassName, captureMockRealClass, captureMockProxy,
*args, **kw):
with self.lock:
traffic = PythonFunctionCallTraffic(captureMockClassName, self.rcHandler,
self.interceptModules, captureMockProxy,
self.callStackChecker.inCallback, *args, **kw)
if self.callStackChecker.callerExcluded(stackDistance=3):
realObj = captureMockRealClass(*args, **kw)
wrapper = traffic.getWrapper(realObj)
return wrapper.name, realObj
self.record(traffic)
if self.replayInfo.isActiveFor(traffic):
responses = self.getReplayResponses(traffic)
if len(responses):
firstText = responses[0][1]
self.recordResponse(firstText)
return self.getReplayInstanceName(firstText, captureMockProxy), None
else:
raise CaptureMockReplayError("Could not match sufficiently well to construct object of type '" + captureMockClassName + "'")
else:
realObj = self.callStackChecker.callNoInterception(False, traffic.callRealFunction,
captureMockRealClass, self.recordFileHandler,
captureMockProxy)
wrapper = traffic.getWrapper(realObj)
self.recordResponse(repr(wrapper))
return wrapper.name, realObj
def recordSetAttribute(self, *args):
if not self.callStackChecker.callerExcluded(stackDistance=3):
traffic = PythonSetAttributeTraffic(self.rcHandler, self.interceptModules, self.callStackChecker.inCallback, *args)
self.record(traffic) | PypiClean |
/101703373_topsis-1.0.0-py3-none-any.whl/topsis/101703373-topsis.py | import sys
import os
import pandas as pd
import math
import numpy as np
class Topsis:
def _init_(self,filename):
if os.path.isdir(filename):
head_tail = os.path.split(filename)
data = pd.read_csv(head_tail[1])
if os.path.isfile(filename):
data = pd.read_csv(filename)
self.d = data.iloc[1:,1:].values
self.features = len(self.d[0])
self.samples = len(self.d)
def fun(self,a):
return a[1]
def fun2(self,a):
return a[0]
def evaluate(self,w = None,im = None):
d = self.d
features = self.features
samples = self.samples
if w==None:
w=[1]*features
if im==None:
im=["+"]*features
ideal_best=[]
ideal_worst=[]
for i in range(0,features):
k = math.sqrt(sum(d[:,i]*d[:,i]))
maxx = 0
minn = 1
for j in range(0,samples):
d[j,i] = (d[j,i]/k)*w[i]
if d[j,i]>maxx:
maxx = d[j,i]
if d[j,i]<minn:
minn = d[j,i]
if im[i] == "+":
ideal_best.append(maxx)
ideal_worst.append(minn)
else:
ideal_best.append(minn)
ideal_worst.append(maxx)
p = []
for i in range(0,samples):
a = math.sqrt(sum((d[i]-ideal_worst)*(d[i]-ideal_worst)))
b = math.sqrt(sum((d[i]-ideal_best)*(d[i]-ideal_best)))
lst = []
lst.append(i)
lst.append(a/(a+b))
p.append(lst)
p.sort(key=self.fun)
rank = 1
for i in range(samples-1,-1,-1):
p[i].append(rank)
rank+=1
p.sort(key=self.fun2)
return p
def findTopsis(filename,w,i):
ob = Topsis(filename)
res = ob.evaluate(w,i)
print(res)
def main():
lst = sys.argv
length = len(lst)
if length > 4 or length< 4:
print("wrong Parameters")
else:
w = list(map(int,lst[2].split(',')))
i = lst[3].split(',')
ob = Topsis(lst[1])
res = ob.evaluate(w,i)
print (res)
if _name_ == '_main_':
main() | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/imdb/utils.py | import difflib
import json
import random
import re
from urllib.parse import quote
from loguru import logger
from flexget import plugin
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup
from flexget.utils.tools import str_to_int
logger = logger.bind(name='imdb.utils')
requests = Session()
# Declare browser user agent to avoid being classified as a bot and getting a 403
requests.headers.update(
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}
)
# requests.headers.update({'User-Agent': random.choice(USERAGENTS)})
# this makes most of the titles to be returned in english translation, but not all of them
requests.headers.update({'Accept-Language': 'en-US,en;q=0.8'})
requests.headers.update(
{'X-Forwarded-For': '24.110.%d.%d' % (random.randint(0, 254), random.randint(0, 254))}
)
# give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1)
requests.add_domain_limiter(TimedLimiter('imdb.com', '3 seconds'))
def is_imdb_url(url):
"""Tests the url to see if it's for imdb.com."""
if not isinstance(url, str):
return
# Probably should use urlparse.
return re.match(r'https?://[^/]*imdb\.com/', url)
def is_valid_imdb_title_id(value):
"""
Return True if `value` is a valid IMDB ID for titles (movies, series, etc).
"""
if not isinstance(value, str):
raise TypeError(f"is_valid_imdb_title_id expects a string but got {type(value)}")
# IMDB IDs for titles have 'tt' followed by 7 or 8 digits
return re.match(r'tt\d{7,8}', value) is not None
def is_valid_imdb_person_id(value):
"""
Return True if `value` is a valid IMDB ID for a person.
"""
if not isinstance(value, str):
raise TypeError(f"is_valid_imdb_person_id expects a string but got {type(value)}")
# An IMDB ID for a person is formed by 'nm' followed by 7 digits
return re.match(r'nm\d{7,8}', value) is not None
def extract_id(url):
"""Return IMDb ID of the given URL. Return None if not valid or if URL is not a string."""
if not isinstance(url, str):
return
m = re.search(r'((?:nm|tt)\d{7,8})', url)
if m:
return m.group(1)
def make_url(imdb_id):
"""Return IMDb URL of the given ID"""
return 'https://www.imdb.com/title/%s/' % imdb_id
class ImdbSearch:
def __init__(self):
# de-prioritize aka matches a bit
self.aka_weight = 0.95
# prioritize first
self.first_weight = 1.5
self.min_match = 0.6
self.min_diff = 0.01
self.debug = False
self.max_results = 50
def ireplace(self, text, old, new, count=0):
"""Case insensitive string replace"""
pattern = re.compile(re.escape(old), re.I)
return re.sub(pattern, new, text, count)
def smart_match(self, raw_name, single_match=True):
"""Accepts messy name, cleans it and uses information available to make smartest and best match"""
parser = plugin.get('parsing', 'imdb_search').parse_movie(raw_name)
name = parser.name
year = parser.year
if not name:
logger.critical('Failed to parse name from {}', raw_name)
return None
logger.debug('smart_match name={} year={}', name, str(year))
return self.best_match(name, year, single_match)
def best_match(self, name, year=None, single_match=True):
"""Return single movie that best matches name criteria or None"""
movies = self.search(name, year)
if not movies:
logger.debug('search did not return any movies')
return None
# remove all movies below min_match, and different year
exact = []
for movie in movies[:]:
if year and movie.get('year'):
if movie['year'] != year:
logger.debug(
'best_match removing {} - {} (wrong year: {})',
movie['name'],
movie['url'],
str(movie['year']),
)
movies.remove(movie)
continue
# Look for exact match
if movie['year'] == year and movie['name'].lower() == name.lower():
exact.append(movie)
if movie['match'] < self.min_match:
logger.debug('best_match removing {} (min_match)', movie['name'])
movies.remove(movie)
continue
if not movies:
logger.debug('FAILURE: no movies remain')
return None
# If we have 1 exact match
if len(exact) == 1:
logger.debug('SUCCESS: found exact movie match')
return exact[0]
# if only one remains ..
if len(movies) == 1:
logger.debug('SUCCESS: only one movie remains')
return movies[0]
# check min difference between best two hits
diff = movies[0]['match'] - movies[1]['match']
if diff < self.min_diff:
logger.debug(
'unable to determine correct movie, min_diff too small (`{}` <-?-> `{}`)',
movies[0],
movies[1],
)
for m in movies:
logger.debug('remain: {} (match: {}) {}', m['name'], m['match'], m['url'])
return None
else:
return movies[0] if single_match else movies
def search(self, name, year=None):
"""Return array of movie details (dict)"""
logger.debug('Searching: {}', name)
# This may include Shorts and TV series in the results
# It is using the live search suggestions api that populates movies as you type in the search bar
search_imdb_id = extract_id(name)
search = name
# Adding the year to the search normally improves the results, except in the case that the
# title of the movie is a number e.g. 1917 (2009)
if year and not name.isdigit():
search += f" {year}"
url = f'https://v3.sg.media-imdb.com/suggestion/titles/x/{quote(search, safe="")}.json'
params = {'includeVideos': 0}
logger.debug('Search query: {}', repr(url))
page = requests.get(url, params=params)
rows = page.json()['d']
movies = []
for count, result in enumerate(rows):
# Title search gives a lot of results, only check the first ones
if count > self.max_results:
break
if result['qid'] not in ['tvMovie', 'movie', 'video']:
logger.debug('skipping {}', result['l'])
continue
movie = {
'name': result['l'],
'year': result.get('y'),
'imdb_id': result['id'],
'url': make_url(result['id']),
'thumbnail': result.get('i', {}).get('imageUrl'),
}
if search_imdb_id and movie['imdb_id'] == search_imdb_id:
movie['match'] = 1.0
return [movie]
logger.debug('processing name: {} url: {}', movie['name'], movie['url'])
# calc & set best matching ratio
seq = difflib.SequenceMatcher(lambda x: x == ' ', movie['name'].title(), name.title())
ratio = seq.ratio()
# prioritize items by position
position_ratio = (self.first_weight - 1) / (count + 1) + 1
logger.debug(
'- prioritizing based on position {} `{}`: {}', count, movie['url'], position_ratio
)
ratio *= position_ratio
# store ratio
movie['match'] = ratio
movies.append(movie)
movies.sort(key=lambda x: x['match'], reverse=True)
return movies
class ImdbParser:
"""Quick-hack to parse relevant imdb details"""
def __init__(self):
self.genres = []
self.languages = []
self.actors = {}
self.directors = {}
self.writers = {}
self.score = 0.0
self.votes = 0
self.meta_score = 0
self.year = 0
self.plot_outline = None
self.name = None
self.original_name = None
self.url = None
self.imdb_id = None
self.photo = None
self.mpaa_rating = ''
self.plot_keywords = []
def __str__(self):
return f'<ImdbParser(name={self.name},imdb_id={self.imdb_id})>'
def parse(self, imdb_id, soup=None):
self.imdb_id = extract_id(imdb_id)
url = make_url(self.imdb_id)
self.url = url
if not soup:
page = requests.get(url)
soup = get_soup(page.text)
data = json.loads(soup.find('script', {'type': 'application/ld+json'}).string)
if not data:
raise plugin.PluginError(
'IMDB parser needs updating, imdb format changed. Please report on Github.'
)
props_data = json.loads(soup.find('script', {'type': 'application/json'}).string)
if (
not props_data
or not props_data.get('props')
or not props_data.get('props').get('pageProps')
):
raise plugin.PluginError(
'IMDB parser needs updating, imdb props_data format changed. Please report on Github.'
)
above_the_fold_data = props_data['props']['pageProps'].get('aboveTheFoldData')
if not above_the_fold_data:
raise plugin.PluginError(
'IMDB parser needs updating, imdb above_the_fold_data format changed. Please report on Github.'
)
title = above_the_fold_data.get('titleText')
if title:
self.name = title.get('text')
if not self.name:
raise plugin.PluginError(
'IMDB parser needs updating, imdb above_the_fold_data format changed for title. Please report on Github.'
)
original_name = above_the_fold_data.get('originalTitleText')
if original_name:
self.original_name = original_name.get('text')
if not self.original_name:
logger.debug('No original title found for {}', self.imdb_id)
# NOTE: We cannot use the get default approach here .(get(x, {}))
# as the data returned in imdb has all fields with null values if they do not exist.
if above_the_fold_data.get('releaseYear'):
self.year = above_the_fold_data['releaseYear'].get('year')
if not self.year:
logger.debug('No year found for {}', self.imdb_id)
self.mpaa_rating = data.get('contentRating')
if not self.mpaa_rating:
logger.debug('No rating found for {}', self.imdb_id)
self.photo = data.get('image')
if not self.photo:
logger.debug('No photo found for {}', self.imdb_id)
rating_data = data.get('aggregateRating')
if rating_data:
rating_count = rating_data.get('ratingCount')
if rating_count:
self.votes = (
str_to_int(rating_count) if not isinstance(rating_count, int) else rating_count
)
else:
logger.debug('No votes found for {}', self.imdb_id)
score = rating_data.get('ratingValue')
if score:
self.score = float(score)
else:
logger.debug('No score found for {}', self.imdb_id)
meta_critic = above_the_fold_data.get('metacritic')
if meta_critic:
meta_score = meta_critic.get('metascore')
if meta_score:
self.meta_score = meta_score.get('score')
if not self.meta_score:
logger.debug('No Metacritic score found for {}', self.imdb_id)
# get director(s)
directors = data.get('director', [])
if not isinstance(directors, list):
directors = [directors]
for director in directors:
if director['@type'] != 'Person':
continue
director_id = extract_id(director['url'])
director_name = director['name']
self.directors[director_id] = director_name
# get writer(s)
writers = data.get('creator', [])
if not isinstance(writers, list):
writers = [writers]
for writer in writers:
if writer['@type'] != 'Person':
continue
writer_id = extract_id(writer['url'])
writer_name = writer['name']
self.writers[writer_id] = writer_name
# Details section
main_column_data = props_data['props']['pageProps'].get('mainColumnData')
if not main_column_data:
raise plugin.PluginError(
'IMDB parser needs updating, imdb main_column_data format changed. Please report on Github.'
)
for language in (main_column_data.get('spokenLanguages') or {}).get('spokenLanguages', []):
self.languages.append(language['text'].lower())
# Storyline section
# NOTE: We cannot use the get default approach here .(get(x, {}))
# as the data returned in imdb has all fields with null values if they do not exist.
plot = above_the_fold_data['plot'] or {}
plot_text = plot.get('plotText') or {}
plot_plain_text = plot_text.get('plainText')
if plot_plain_text:
self.plot_outline = plot_plain_text
if not self.plot_outline:
logger.debug('No storyline found for {}', self.imdb_id)
storyline_keywords = data.get('keywords') or ''
if storyline_keywords:
self.plot_keywords = storyline_keywords.split(',')
genres = (above_the_fold_data.get('genres', {}) or {}).get('genres', [])
self.genres = [g['text'].lower() for g in genres]
# Cast section
cast_data = main_column_data.get('cast', {}) or {}
for cast_node in cast_data.get('edges') or []:
actor_node = (cast_node.get('node') or {}).get('name') or {}
actor_id = actor_node.get('id')
actor_name = (actor_node.get('nameText') or {}).get('text')
if actor_id and actor_name:
self.actors[actor_id] = actor_name
principal_cast_data = main_column_data.get('principalCast', []) or []
if principal_cast_data:
for cast_node in principal_cast_data[0].get('credits') or []:
actor_node = cast_node.get('name') or {}
actor_id = actor_node.get('id')
actor_name = (actor_node.get('nameText') or {}).get('text')
if actor_id and actor_name:
self.actors[actor_id] = actor_name | PypiClean |
/Flask-AppFactory-0.2.1.tar.gz/Flask-AppFactory-0.2.1/flask_appfactory/app.py | from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import re
import sys
import warnings
import ast
from flask import Flask
from flask_cli import FlaskCLI
from flask_registry import BlueprintAutoDiscoveryRegistry, \
ConfigurationRegistry, ExtensionRegistry, PackageRegistry, Registry
def configure_warnings():
"""Configure warnings by routing warnings to the logging system.
It also unhides ``DeprecationWarning``.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
# DeprecationWarning is by default hidden, hence we force the
# "default" behavior on deprecation warnings which is not to hide
# errors.
warnings.simplefilter("default", DeprecationWarning)
def load_config(app, module_name, **kwargs_config):
"""Load configuration.
Configuration is loaded in the following order:
1. Configuration module (i.e. ``module_name``).
2. Instance configuration in ``<instance folder>/<app name>.cfg``
3. Keyword configuration arguments.
4. Environment variables specified in ``<app name>_APP_CONFIG_ENVS``
configuration variable or comma separated list in environment variable
with the same name.
Additionally checks if ``SECRET_KEY`` is set in the configuration and warns
if it is not.
:param app: Flask application.
:param module_name: Configuration module.
:param kwargs_config: Configuration keyword arguments
"""
# 1. Load site specific default configuration
if module_name:
app.config.from_object(module_name)
# 2. Load <app name>.cfg from instance folder
app.config.from_pyfile('{0}.cfg'.format(app.name), silent=True)
# 3. Update application config from parameters.
app.config.update(kwargs_config)
# 4. Update config with specified environment variables.
envvars = '{0}_APP_CONFIG_ENVS'.format(app.name.upper())
for cfg_name in app.config.get(envvars, os.getenv(envvars, '')).split(','):
cfg_name = cfg_name.strip().upper()
if cfg_name:
cfg_value = app.config.get(cfg_name)
cfg_value = os.getenv(cfg_name, cfg_value)
try:
cfg_value = ast.literal_eval(cfg_value)
except (SyntaxError, ValueError):
pass
app.config[cfg_name] = cfg_value
app.logger.debug("{0} = {1}".format(cfg_name, cfg_value))
# Ensure SECRET_KEY is set.
SECRET_KEY = app.config.get('SECRET_KEY')
if SECRET_KEY is None:
app.config["SECRET_KEY"] = 'change_me'
warnings.warn(
"Set variable SECRET_KEY with random string in {}".format(
os.path.join(app.instance_path, "{}.cfg".format(app.name)),
), UserWarning)
# Initialize application registry, used for discovery and loading of
# configuration, extensions and blueprints
Registry(app=app)
app.extensions['registry'].update(
# Register packages listed in PACKAGES conf variable.
packages=PackageRegistry(app))
app.extensions['loaded'] = False
def load_application(app):
"""Load the application.
Assembles the application by use of ``PACKAGES`` and ``EXTENSIONS``
configuration variables.
1. Load extensions by calling ``setup_app()`` in module defined in
``EXTENSIONS``.
2. Register blueprints from each module defined in ``PACAKGES`` by looking
searching in ``views.py`` for a ``blueprint`` or ``blueprints``
variable.
:param app: Flask application.
"""
# Extend application config with default configuration values from packages
# (app config takes precedence)
app.extensions['registry'].update(
# Register extensions listed in EXTENSIONS conf variable.
extensions=ExtensionRegistry(app),
# Register blueprints from packages in PACKAGES configuration variable.
blueprints=BlueprintAutoDiscoveryRegistry(app=app),
)
ConfigurationRegistry(app)
app.extensions['loaded'] = True
def base_app(app_name, instance_path=None, static_folder=None,
static_url_path='/static/', instance_relative_config=True,
template_folder='templates', flask_cls=Flask):
"""Create a base Flask Application.
Ensures instance path and is set and created. Instance path defaults to
``<sys.prefix>/var/<app name>-instance``.
Additionally configure warnings to be routed to the Python logging system,
and by default makes ``DeprecationWarning`` loud.
.. versionchanged:: v0.2.0
Added ``flask_cls`` parameter.
:param app_name: Flask application name.
:param instance_path: Instance path
:param static_folder: Static folder.
:param static_url_path: URL path of static folder. Default: ``/static/``.
:param instance_relative_config: Use instance relative config
Default: ``True``.
:param template_folder: Template folder. Default: ``templates``.
:param flask_cls: Flask Application class. Default: ``Flask``.
"""
configure_warnings()
# Prefix for env variables
env_prefix = re.sub('[^A-Z]', '', app_name.upper())
# Detect instance path
instance_path = instance_path or \
os.getenv(env_prefix + '_INSTANCE_PATH') or \
os.path.join(sys.prefix, 'var', app_name + '-instance')
# Detect static files path
static_folder = static_folder or \
os.getenv(env_prefix + '_STATIC_FOLDER') or \
os.path.join(instance_path, 'static')
# Create instance path
try:
if not os.path.exists(instance_path):
os.makedirs(instance_path)
except Exception: # pragma: no cover
pass
# Create the Flask application instance
app = flask_cls(
app_name,
static_url_path=static_url_path,
static_folder=static_folder,
instance_relative_config=instance_relative_config,
instance_path=instance_path,
template_folder=template_folder,
)
# Compatibility layer to support Flask 1.0 click integration on v0.10
FlaskCLI(app=app)
return app
def appfactory(app_name, module_name, load=True, **kwargs_config):
"""Create a Flask application according to a defined configuration.
:param app_name: Flask application name.
:param module_name: Python configuration module.
:param load: Load application (instead of only the configuration).
Default: ``True``.
:param kwargs_config: Extra configuration variables for the Flask
application.
"""
app = base_app(app_name)
load_config(app, module_name, **kwargs_config)
if load:
load_application(app)
return app | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/manager/imports/models/statements/module.py | from typing import List, Dict
import ast
import importlib
from types import ModuleType
from dero.mixins.repr import ReprMixin
from dero.manager.imports.models.statements.importbase import ImportStatement
from dero.manager.imports.models.statements.rename import RenameStatementCollection
from dero.manager.imports.models.statements.comment import Comment
from dero.manager.imports.logic.load.ext_importlib import get_filepath_from_module_str
class ModuleImportStatement(ImportStatement, ReprMixin):
rename_attr = 'modules'
repr_cols = ['modules', 'renames', 'comment']
equal_attrs = ['modules', 'renames', 'comment']
def __init__(self, modules: List[str], renames: RenameStatementCollection = None, comment: Comment=None,
preferred_position: str = None):
if renames is None:
renames = RenameStatementCollection([])
self.modules = modules
self.renames = renames
self.comment = comment
self.preferred_position = preferred_position # sets self.prefer_beginning as bool
def __str__(self):
modules = self._renamed
modules_str = ', '.join(modules)
import_str = f'import {modules_str}'
if self.comment is not None:
import_str += f' {self.comment}'
return import_str
@classmethod
def from_str(cls, import_str: str, renames: RenameStatementCollection = None, comment: Comment=None,
preferred_position: str = None):
from dero.manager.io.file.load.parsers.imp import extract_module_import_from_ast
ast_module = ast.parse(import_str)
cls_obj = extract_module_import_from_ast(ast_module)
cls_obj.comment = comment
cls_obj.preferred_position = preferred_position
return cls_obj
@classmethod
def from_ast_import(cls, ast_import: ast.Import, preferred_position: str = None):
# Create RenameStatementCollection
renames = RenameStatementCollection.from_ast_import(ast_import)
# Get module original names
modules = [alias.name for alias in ast_import.names]
return cls(
modules,
renames,
preferred_position=preferred_position
)
def get_module_filepaths(self, import_section_path_str: str=None) -> Dict[str, str]:
return {module: get_filepath_from_module_str(module, import_section_path_str) for module in self.modules}
def execute(self, import_section_path_str: str=None) -> List[ModuleType]:
return [importlib.import_module(mod_str, import_section_path_str) for mod_str in self.modules]
@property
def module(self):
if len(self.modules) > 1:
raise ValueError('cannot get one module, import has multiple modules')
return self.modules[0] | PypiClean |
/HC_SR04-0.3.1-py3-none-any.whl/tools/serial.py | from serial import Serial
class _SerialDevice:
def __init__(self, port, baudrate, timeout = 3, open = False):
'''
init method
if open is set True then call open_serial()
'''
self._port = port
self._baudrate = baudrate
self._timeout = timeout
self.serial = None
if open:
self.open_serial()
@property
def port(self):
return self._port
@port.setter
def port(self, port):
'''
port setter method
if serial was opened
- Close serial
- Open serial
'''
self._port = port
if not self.serial:
self.close_serial()
self.open_serial()
@property
def baudrate(self):
return self._baudrate
@property
def timeout(self):
return self._timeout
def open_serial(self):
if not self.serial:
self.close_serial()
try:
self.serial = Serial(self._port, self._baudrate, self._timeout)
except:
print('Error : Can not open Serial, Retry!')
return False
return True
def close_serial(self):
if self.serial:
self.serial.close()
self.serial = None
def write(self, message):
if self.serial:
if type(message) is not str:
message = str(message)
self.serial.write(bytes(message.encode()))
def readline(self):
if self.serial:
return self.serial.readline().decode('utf-8').strip()
def test(self):
'''
Print out status
'''
if self:
if self.serial:
print('Serial is opened')
else:
print('Serial is not opened')
if self.port:
print('Port : %s'%(self.port))
if self._baudrate:
print('Baudrate : %s'%(self._baudrate))
if self._timeout:
print('Timeout : %s'%(self._timeout)) | PypiClean |
/Dickbot.py-1.1-py3-none-any.whl/BotAmino/extensions.py | from time import sleep as slp
from threading import Thread
# from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from unicodedata import normalize
from string import punctuation
from amino import objects
from .Bot import Bot
class TimeOut:
users_dict = {}
def time_user(self, uid, end: int = 5):
if uid not in self.users_dict.keys():
self.users_dict[uid] = {"start": 0, "end": end}
Thread(target=self.timer, args=[uid]).start()
def timer(self, uid):
while self.users_dict[uid]["start"] <= self.users_dict[uid]["end"]:
self.users_dict[uid]["start"] += 1
slp(1)
del self.users_dict[uid]
def timed_out(self, uid):
if uid in self.users_dict.keys():
return self.users_dict[uid]["start"] >= self.users_dict[uid]["end"]
return True
class BannedWords:
def filtre_message(self, message, code):
para = normalize('NFD', message).encode(code, 'ignore').decode("utf8").strip().lower()
para = para.translate(str.maketrans("", "", punctuation))
return para
def check_banned_words(self, args):
for word in ("ascii", "utf8"):
with suppress(Exception):
para = self.filtre_message(args.message, word).split()
if para != [""]:
with suppress(Exception):
[args.subClient.delete_message(args.chatId, args.messageId, reason=f"Banned word : {elem}", asStaff=True) for elem in para if elem in args.subClient.banned_words]
class Parameters:
__slots__ = (
"subClient", "chatId", "authorId", "author", "message", "messageId",
"authorIcon", "comId", "replySrc", "replyMsg", "replyId", "info"
)
def __init__(self, data: objects.Event, subClient: Bot):
self.subClient: Bot = subClient
self.chatId: str = data.message.chatId
self.authorId: str = data.message.author.userId
self.author: str = data.message.author.nickname
self.message: str = data.message.content
self.messageId: str = data.message.messageId
self.authorIcon: str = data.message.author.icon
self.comId: str = data.comId
self.replySrc: str = None
self.replyId: str = None
if data.message.extensions and data.message.extensions.get('replyMessage', None) and data.message.extensions['replyMessage'].get('mediaValue', None):
self.replySrc = data.message.extensions['replyMessage']['mediaValue'].replace('_00.', '_hq.')
self.replyId = data.message.extensions['replyMessage']['messageId']
self.replyMsg: str = None
if data.message.extensions and data.message.extensions.get('replyMessage', None) and data.message.extensions['replyMessage'].get('content', None):
self.replyMsg: str = data.message.extensions['replyMessage']['content']
self.replyId: str = data.message.extensions['replyMessage']['messageId']
self.info: objects.Event = data | PypiClean |
/MolGNN_try_2-0.0.1.tar.gz/MolGNN_try_2-0.0.1/README.md | # GNN-based QSAR models
### An example of Graph Neural Networks for QSAR modelling
The base models are adapted
from [this tutorial](https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial7/GNN_overview.html),
part of the [UvA Deep Learning](https://uvadlc.github.io/) course.
The featurisation of molecules as `torch_geometric` objects is taken
from [this blog post](https://www.blopig.com/blog/2022/02/how-to-turn-a-smiles-string-into-a-molecular-graph-for-pytorch-geometric/)
by the Oxford Protein Informatics Group.
# Walkthrough
Here is an example where the models are used to predict experimental hydration free energy of
the [FreeSolv](https://github.com/MobleyLab/FreeSolv) dataset.
## Import the necessary modules
```python
import pandas as pd
import torch
from torch_geometric.loader import DataLoader
from sklearn.model_selection import train_test_split
from MolGNN import MolToGraph
from MolGNN.models import GraphClassifier, GraphRegressor
device = 'cuda' if torch.cuda.is_available() else 'cpu'
```
## Load the data
```python
solv = pd.read_csv('MolGNN/test/FreeSolv.tsv', sep=';')
smiles = solv.SMILES.values
train_idx, val_idx = train_test_split(range(len(smiles)), test_size=0.2, random_state=42)
smiles_train, smiles_val = smiles[train_idx], smiles[val_idx]
```
## Train a GNN classifier
We label compounds based on whether the experimental hydration energy is above the median,
so to have an artifically perfectly balanced dataset.
The performance metric used here is MCC.
```python
y = (
solv['experimental value (kcal/mol)'].values >= solv['experimental value (kcal/mol)'].median()
).astype(int)
y_train, y_val = y[train_idx], y[val_idx]
# Featurize the molecules and build dataloaders
train_loader = DataLoader(
dataset=MolToGraph.create_pyg_data_lst(smiles_train, y_train, device=device),
batch_size=32
)
val_loader = DataLoader(
dataset=MolToGraph.create_pyg_data_lst(smiles_val, y_val, device=device),
batch_size=32
)
# Initialize the model
model = GraphClassifier(
num_classes=1,
c_in=79,
c_hidden=256,
num_layers=3,
dp_rate_linear=0.5,
dp_rate=0.0,
lr=1e-2, weight_decay=0,
device=device
)
# Train the model
val_loss, val_metric = model.fit(train_loader, val_loader, n_epochs=100, log_every_epochs=10)
```
```2022-05-24 17:08:47 INFO Epoch: 100/100 | val loss: 0.374 | val metric: 0.837```
## Train a GNN regressor
The performance metric is R2.
```python
y = solv['experimental value (kcal/mol)'].values
y_train, y_val = y[train_idx], y[val_idx]
# Featurize the molecules and build dataloaders
train_loader = DataLoader(
dataset=MolToGraph.create_pyg_data_lst(smiles_train, y_train, device=device),
batch_size=32
)
val_loader = DataLoader(
dataset=MolToGraph.create_pyg_data_lst(smiles_val, y_val, device=device),
batch_size=32
)
# Initialize the model
model = GraphRegressor(
c_in=79,
c_hidden=256,
num_layers=3,
dp_rate_linear=0.5,
dp_rate=0.0,
lr=1e-2, weight_decay=0,
device=device
)
# Train the model
val_loss, val_metric = model.fit(train_loader, val_loader, n_epochs=100, log_every_epochs=10)
```
```2022-05-24 17:06:36 INFO Epoch: 100/100 | val loss: 1.672 | val metric: 0.894``` | PypiClean |
/KratosShallowWaterApplication-9.4-cp311-cp311-win_amd64.whl/KratosMultiphysics/ShallowWaterApplication/coupling/write_from_sw_at_interface_process.py | import KratosMultiphysics as KM
import KratosMultiphysics.ShallowWaterApplication as SW
from KratosMultiphysics.HDF5Application import single_mesh_temporal_output_process
from KratosMultiphysics.ShallowWaterApplication.coupling import depth_integration_output_process as BaseProcess
def Factory(settings, model):
if not isinstance(settings, KM.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return WriteFromSwAtInterfaceProcess(model, settings["Parameters"])
class WriteFromSwAtInterfaceProcess(BaseProcess.DepthIntegrationOutputProcess):
"""WriteFromSwAtInterfaceProcess
This process stores the varialbes of a SW simulation into specific nodes,
used as interface, and printed in HDF5 format.
"""
@staticmethod
def GetDefaultParameters():
return KM.Parameters("""{
"volume_model_part_name" : "",
"interface_model_part_name" : "",
"output_model_part_name" : "",
"store_historical_database" : false,
"extrapolate_boundaries" : false,
"print_velocity_profile" : false,
"interval" : [0.0,"End"],
"file_settings" : {},
"output_time_settings" : {}
}""")
def __init__(self, model, settings):
"""The constructor of the WriteFromSwAtInterfaceProcess"""
KM.OutputProcess.__init__(self)
self.settings = settings
self.settings.ValidateAndAssignDefaults(self.GetDefaultParameters())
self.volume_model_part = model[self.settings["volume_model_part_name"].GetString()]
self.interface_model_part = model[self.settings["interface_model_part_name"].GetString()]
self.output_model_part = model.CreateModelPart(self.settings["output_model_part_name"].GetString())
self.interval = KM.IntervalUtility(self.settings)
self.variables = [KM.VELOCITY, KM.MOMENTUM, SW.HEIGHT, SW.VERTICAL_VELOCITY, SW.TOPOGRAPHY]
if self.volume_model_part.ProcessInfo[KM.DOMAIN_SIZE] == 2:
self.integration_process = SW.WriteFromSwAtInterfaceProcess2D(model, self._CreateIntegrationParameters())
else:
self.integration_process = SW.WriteFromSwAtInterfaceProcess3D(model, self._CreateIntegrationParameters())
self.hdf5_process = single_mesh_temporal_output_process.Factory(self._CreateHDF5Parameters(), model)
def _InitializeOutputModelPart(self):
if self.settings["store_historical_database"].GetBool():
self.output_model_part.AddNodalSolutionStepVariable(SW.HEIGHT)
self.output_model_part.AddNodalSolutionStepVariable(KM.MOMENTUM)
self.output_model_part.AddNodalSolutionStepVariable(KM.VELOCITY)
self.output_model_part.AddNodalSolutionStepVariable(SW.VERTICAL_VELOCITY)
self.output_model_part.AddNodalSolutionStepVariable(SW.TOPOGRAPHY)
domain_size = self.volume_model_part.ProcessInfo[KM.DOMAIN_SIZE]
element_name = "Element{}D2N".format(domain_size)
condition_name = "LineCondition{}D2N".format(domain_size)
KM.DuplicateMeshModeler(self.interface_model_part).GenerateMesh(
self.output_model_part, element_name, condition_name)
self.output_model_part.ProcessInfo[KM.DOMAIN_SIZE] = domain_size
def _CreateHDF5Parameters(self):
hdf5_settings = KM.Parameters()
hdf5_settings.AddValue("model_part_name", self.settings["output_model_part_name"])
hdf5_settings.AddValue("file_settings", self.settings["file_settings"])
hdf5_settings.AddValue("output_time_settings", self.settings["output_time_settings"])
data_settings = KM.Parameters("""{"list_of_variables" : ["MOMENTUM","VELOCITY","HEIGHT", "VERTICAL_VELOCITY","TOPOGRAPHY"]}""")
if self.settings["store_historical_database"].GetBool():
hdf5_settings.AddValue("nodal_solution_step_data_settings", data_settings)
else:
hdf5_settings.AddValue("nodal_data_value_settings", data_settings)
hdf5_process_settings = KM.Parameters()
hdf5_process_settings.AddValue("Parameters", hdf5_settings)
return hdf5_process_settings | PypiClean |
/GSimPy-0.0.2-py3-none-any.whl/function/get_datainfo.py | from data.connect_database import *
def get_group_info(database_name, table_name, group_name):
"""
Query item-element information from item table
:param database_name: string
The database to connect to
:param table_name: string
The database table being queried
:param group_name: string
The item for query
:return: query_list
Information in query_list represent GROUP_ID, ITEM_LIST, ITEM_NUM, ELEMENT_LIST, ELEMENT_NUM respectively.
"""
conn = connect_database(database_name)
c = conn.cursor()
sql = ("select * from [{0}] where GROUP_ID = '{1}'".format(table_name, group_name))
c.execute(sql)
query_info = c.fetchall()
query_info = query_info[0]
query_list = list(query_info)
conn.close()
return query_list
# print(get_group_info('sample', 'group', 'I'))
def get_item_info(database_name, table_name, item_name):
"""
Query item-element information from item table
:param database_name: string
The database to connect to
:param table_name: string
The database table being queried
:param item_name: string
The item for query
:return: query_list
Information in query_list represent ITEM_ID, ELEMENT_LIST, ELEMENT_NUM, respectively.
"""
conn = connect_database(database_name)
c = conn.cursor()
sql = ("select * from {0} where ITEM_ID = '{1}'".format(table_name,item_name))
c.execute(sql)
query_info = c.fetchall()
query_info = query_info[0]
query_list = list(query_info)
conn.close()
return query_list
# print(get_item_info('sample','item', 'i1'))
def get_2group_shared_items(database_name, table_name, group1, group2):
"""
Query the shared items information between group1 and group2
:param database_name: string
The database to connect to
:param table_name: string
The item table being queried
:param group_name1: string
group_id of group1
:param group_name2: string
group_id of group2
:return: query_list
Information in query_list represent group1,group2,shared_item_list,shared_item_num
respectively.
"""
conn = connect_database(database_name)
c = conn.cursor()
sql = ("select * from [{0}] where GROUP_ID = '{1}' ".format(table_name, group1))
c.execute(sql)
query1_info = c.fetchall()
query1_info = query1_info[0]
query1_1ist = list(query1_info)
sql = ("select * from [{0}] where GROUP_ID = '{1}' ".format(table_name, group2))
c.execute(sql)
query2_info = c.fetchall()
query2_info = query2_info[0]
query2_1ist = list(query2_info)
itemlist1 = query1_1ist[1]
itemlist1 = itemlist1.split(',')
itemset1 = set(itemlist1)
itemlist2 = query2_1ist[1]
itemlist2 = itemlist2.split(',')
itemset2 = set(itemlist2)
shared_itemset = itemset1 & itemset2
shared_itemlist = list(shared_itemset)
num_of_shared_item = len(shared_itemlist)
shared_itemlist = ','.join(shared_itemlist)
query_list = list()
query_list.append(group1)
query_list.append(group2)
query_list.append(shared_itemlist)
query_list.append(num_of_shared_item)
conn.close()
return query_list
# print(get_2group_shared_items('sample', 'group', 'I', 'J'))
def get_2item_shared_elements(database_name, table_name, item1, item2):
"""
Query the shared elements information between item1 and item2
:param database_name: string
The database to connect to
:param table_name: string
The item table being queried
:param item1: string
item_id of item1
:param item2: string
item_id of item2
:return: query_list
Information in query_list represent item1,item2,shared_element_list,shared_element_num
respectively.
"""
conn = connect_database(database_name)
c = conn.cursor()
sql = ("select * from [{0}] where ITEM_ID = '{1}' ".format(table_name, item1))
c.execute(sql)
query1_info = c.fetchall()
query1_info = query1_info[0]
query1_1ist = list(query1_info)
sql = ("select * from [{0}] where ITEM_ID = '{1}' ".format(table_name, item2))
c.execute(sql)
query2_info = c.fetchall()
query2_info = query2_info[0]
query2_1ist = list(query2_info)
elementlist1 = query1_1ist[1]
elementlist1 = elementlist1.split(',')
elementset1 = set(elementlist1)
elementlist2 = query2_1ist[1]
elementlist2 = elementlist2.split(',')
elementset2 = set(elementlist2)
shared_elementset = elementset1 & elementset2
shared_elementlist = list(shared_elementset)
num_of_shared_element = len(shared_elementlist)
shared_elementlist = ','.join(shared_elementlist)
query_list = list()
query_list.append(item1)
query_list.append(item2)
query_list.append(shared_elementlist)
query_list.append(num_of_shared_element)
conn.close()
return query_list
# print(get_2item_shared_elements('sample', 'item', 'i2', 'j2'))
def get_2group_shared_elements(database_name, table_name, group1, group2):
"""
Query the shared elements information between group1 and group2
:param database_name: string
The database to connect to
:param table_name: string
The item table being queried
:param group1: string
group_id of group1
:param group2: string
group_id of group2
:return: query_list
Information in query_list represent group1,group2,shared_element_list,shared_element_num
respectively.
"""
conn = connect_database(database_name)
c = conn.cursor()
sql = ("select * from [{0}] where GROUP_ID = '{1}' ".format(table_name, group1))
c.execute(sql)
query1_info = c.fetchall()
query1_info = query1_info[0]
query1_1ist = list(query1_info)
sql = ("select * from [{0}] where GROUP_ID = '{1}' ".format(table_name, group2))
c.execute(sql)
query2_info = c.fetchall()
query2_info = query2_info[0]
query2_1ist = list(query2_info)
elementlist1 = query1_1ist[3]
elementlist1 = elementlist1.split(',')
elementset1 = set(elementlist1)
elementlist2 = query2_1ist[3]
elementlist2 = elementlist2.split(',')
elementset2 = set(elementlist2)
shared_elementset = elementset1 & elementset2
shared_elementlist = list(shared_elementset)
num_of_shared_element = len(shared_elementlist)
shared_elementlist = ','.join(shared_elementlist)
query_list = list()
query_list.append(group1)
query_list.append(group2)
query_list.append(shared_elementlist)
query_list.append(num_of_shared_element)
conn.close()
return query_list
# print(get_2group_shared_elements('sample', 'group', 'I', 'J'))
def get_all_element_num(database_name, table_name):
"""
Query all element number
:param database_name: string
The database to connect to
:param table_name: string
The database table being queried
:return: all_element_num
The number of all the elements
"""
conn = connect_database(database_name)
c = conn.cursor()
all_element_list = list()
sql = ("select * from [{0}]".format(table_name))
c.execute(sql)
query_list = c.fetchall()
for query_line in query_list:
tuple_list = query_line[1]
tuple_list = tuple_list.split(',')
for gene in tuple_list:
if gene not in all_element_list:
all_element_list.append(gene)
all_element_num = len(all_element_list)
return all_element_num
# print(get_all_element_num('sample','item'))
def get_all_element_list(database_name, table_name):
"""
Query the list of all elements
:param database_name: string
The database to connect to
:param table_name: string
The item table
:return: all_element_list
The list of all elements
"""
conn = connect_database(database_name)
c = conn.cursor()
all_element_list = list()
sql = ("select * from [{0}]".format(table_name))
c.execute(sql)
query_list = c.fetchall()
for query_line in query_list:
tuple_list = query_line[1]
tuple_list = tuple_list.split(',')
for gene in tuple_list:
if gene not in all_element_list:
all_element_list.append(gene)
return all_element_list
# print(get_all_element_list('sample', 'item'))
def get_all_group_num(database_name, table_name):
"""
Query all group number
:param database_name: string
The database to connect to
:param table_name: string
The group table name
:return: all_group_num
The number of all the group
"""
conn = connect_database(database_name)
c = conn.cursor()
all_group_list = list()
sql = ("select * from [{0}]".format(table_name))
c.execute(sql)
query_list = c.fetchall()
for query_line in query_list:
group = query_line[0]
if group not in all_group_list:
all_group_list.append(group)
all_group_num = len(all_group_list)
return all_group_num
def get_all_item_num(database_name, table_name):
"""
Query all element number
:param database_name: string
The database to connect to
:param table_name: string
The database table being queried
:return: all_item_num
The number of all the items
"""
conn = connect_database(database_name)
c = conn.cursor()
all_item_list = list()
sql = ("select * from [{0}]".format(table_name))
c.execute(sql)
query_list = c.fetchall()
for query_line in query_list:
mesh = query_line[0]
if mesh not in all_item_list:
all_item_list.append(mesh)
all_item_num = len(all_item_list)
return all_item_num
# print(get_all_item_num('sample','item'))
# print(get_all_group_num('sample','group')) | PypiClean |
/ChatDBG-0.0.15-py3-none-any.whl/chatdbg/chatdbg.py |
from . import pdb
from .pdb import Pdb, Restart, _ModuleTarget, _ScriptTarget
import asyncio
import sys
import traceback
from . import chatdbg_why
class ChatDBG(Pdb):
def do_why(self, arg):
asyncio.run(chatdbg_why.why(self, arg))
import importlib.metadata
_usage = f"""\
usage: chatdbg [-c command] ... [-m module | pyfile] [arg] ...
A Python debugger that uses AI to tell you `why`.
(version {importlib.metadata.metadata('ChatDBG')['Version']})
https://github.com/plasma-umass/ChatDBG
Debug the Python program given by pyfile. Alternatively,
an executable module or package to debug can be specified using
the -m switch.
Initial commands are read from .pdbrc files in your home directory
and in the current directory, if they exist. Commands supplied with
-c are executed after commands from .pdbrc files.
To let the script run until an exception occurs, use "-c continue".
You can then type `why` to get an explanation of the root cause of
the exception, along with a suggested fix. NOTE: you must have an
OpenAI key saved as the environment variable OPENAI_API_KEY.
You can get a key here: https://openai.com/api/
To let the script run up to a given line X in the debugged file, use
"-c 'until X'"."""
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], "mhc:", ["help", "command="])
if not args:
print(_usage)
sys.exit(2)
if any(opt in ["-h", "--help"] for opt, optarg in opts):
print(_usage)
sys.exit()
commands = [optarg for opt, optarg in opts if opt in ["-c", "--command"]]
module_indicated = any(opt in ["-m"] for opt, optarg in opts)
cls = _ModuleTarget if module_indicated else _ScriptTarget
target = cls(args[0])
target.check()
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = ChatDBG()
pdb.rcLines.extend(commands)
while True:
try:
pdb._run(target)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", target, "with arguments:")
print("\t" + " ".join(sys.argv[1:]))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=" ")
print(sys.exc_info()[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The " + target + " will be restarted")
# When invoked as main program, invoke the debugger on a script
if __name__ == "__main__":
import chatdbg
chatdbg.main() | PypiClean |
/Flask-Security-Elucidata-1.0.0.tar.gz/Flask-Security-Elucidata-1.0.0/docs/api.rst | API
===
Core
----
.. autoclass:: flask_security.core.Security
:members:
.. data:: flask_security.core.current_user
A proxy for the current user.
Protecting Views
----------------
.. autofunction:: flask_security.decorators.login_required
.. autofunction:: flask_security.decorators.roles_required
.. autofunction:: flask_security.decorators.roles_accepted
.. autofunction:: flask_security.decorators.http_auth_required
.. autofunction:: flask_security.decorators.auth_token_required
User Object Helpers
-------------------
.. autoclass:: flask_security.core.UserMixin
:members:
.. autoclass:: flask_security.core.RoleMixin
:members:
.. autoclass:: flask_security.core.AnonymousUser
:members:
Datastores
----------
.. autoclass:: flask_security.datastore.UserDatastore
:members:
.. autoclass:: flask_security.datastore.SQLAlchemyUserDatastore
:members:
:inherited-members:
.. autoclass:: flask_security.datastore.MongoEngineUserDatastore
:members:
:inherited-members:
.. autoclass:: flask_security.datastore.PeeweeUserDatastore
:members:
:inherited-members:
.. autoclass:: flask_security.datastore.PonyUserDatastore
:members:
:inherited-members:
Utils
-----
.. autofunction:: flask_security.utils.login_user
.. autofunction:: flask_security.utils.logout_user
.. autofunction:: flask_security.utils.get_hmac
.. autofunction:: flask_security.utils.verify_password
.. autofunction:: flask_security.utils.verify_and_update_password
.. autofunction:: flask_security.utils.encrypt_password
.. autofunction:: flask_security.utils.hash_password
.. autofunction:: flask_security.utils.url_for_security
.. autofunction:: flask_security.utils.get_within_delta
.. autofunction:: flask_security.utils.send_mail
.. autofunction:: flask_security.utils.get_token_status
Signals
-------
See the `Flask documentation on signals`_ for information on how to use these
signals in your code.
See the documentation for the signals provided by the Flask-Login and
Flask-Principal extensions. In addition to those signals, Flask-Security
sends the following signals.
.. data:: user_registered
Sent when a user registers on the site. In addition to the app (which is the
sender), it is passed `user` and `confirm_token` arguments.
.. data:: user_confirmed
Sent when a user is confirmed. In addition to the app (which is the
sender), it is passed a `user` argument.
.. data:: confirm_instructions_sent
Sent when a user requests confirmation instructions. In addition to the app
(which is the sender), it is passed a `user` argument.
.. data:: login_instructions_sent
Sent when passwordless login is used and user logs in. In addition to the app
(which is the sender), it is passed `user` and `login_token` arguments.
.. data:: password_reset
Sent when a user completes a password reset. In addition to the app (which is
the sender), it is passed a `user` argument.
.. data:: password_changed
Sent when a user completes a password change. In addition to the app (which is
the sender), it is passed a `user` argument.
.. data:: reset_password_instructions_sent
Sent when a user requests a password reset. In addition to the app (which is
the sender), it is passed `user` and `token` arguments.
.. _Flask documentation on signals: http://flask.pocoo.org/docs/signals/
| PypiClean |
/Icotest_DeviceServer-1.0.3-py3-none-any.whl/DeviceServer/api_client.py | from __future__ import absolute_import
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from DeviceServer.configuration import Configuration
import DeviceServer.models
from DeviceServer import rest
from DeviceServer.exceptions import ApiValueError, ApiException
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.3/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_types_map=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings,
request_auth=_request_auth)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8') if six.PY3 else e.body
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return return_data
response_type = response_types_map.get(response_data.status, None)
if six.PY3 and response_type not in ["file", "bytes"]:
match = None
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
encoding = match.group(1) if match else "utf-8"
response_data.data = response_data.data.decode(encoding)
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict['):
sub_kls = re.match(r'dict\[([^,]*), (.*)\]', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(DeviceServer.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_types_map=None, auth_settings=None,
async_req=None, _return_http_data_only=None,
collection_formats=None,_preload_content=True,
_request_timeout=None, _host=None, _request_auth=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_token: dict, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_types_map, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_request_auth)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_types_map,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _request_auth))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types, method=None, body=None):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:param method: http method (e.g. POST, PATCH).
:param body: http body to send.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return None
content_types = [x.lower() for x in content_types]
if (method == 'PATCH' and
'application/json-patch+json' in content_types and
isinstance(body, list)):
return 'application/json-patch+json'
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, queries, auth_settings,
request_auth=None):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param request_auth: if set, the provided settings will
override the token in the configuration.
"""
if not auth_settings:
return
if request_auth:
self._apply_auth_params(headers, queries, request_auth)
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
self._apply_auth_params(headers, queries, auth_setting)
def _apply_auth_params(self, headers, queries, auth_setting):
"""Updates the request parameters based on a single auth_setting
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_setting: auth settings for the endpoint
"""
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
queries.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
has_discriminator = False
if (hasattr(klass, 'get_real_child_model')
and klass.discriminator_value_class_map):
has_discriminator = True
if not klass.openapi_types and has_discriminator is False:
return data
kwargs = {}
if (data is not None and
klass.openapi_types is not None and
isinstance(data, (list, dict))):
for attr, attr_type in six.iteritems(klass.openapi_types):
if klass.attribute_map[attr] in data:
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
kwargs["local_vars_configuration"] = self.configuration
instance = klass(**kwargs)
if has_discriminator:
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/imdb/imdb.py | from loguru import logger
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
logger = logger.bind(name='imdb')
class FilterImdb:
"""
This plugin allows filtering based on IMDB score, votes and genres etc.
Note: All parameters are optional. Some are mutually exclusive.
Configuration::
min_score: <num>
min_votes: <num>
min_meta_score: <num>
min_year: <num>
max_year: <num>
# accept movies with any of these genres
accept_genres:
- genre1
- genre2
# reject if genre contains any of these
reject_genres:
- genre1
- genre2
# reject if language contain any of these
reject_languages:
- language1
# accept only these primary languages
accept_languages:
- language1
# accept movies with any of these actors
accept_actors:
- nm0004695
- nm0004754
# reject movie if it has any of these actors
reject_actors:
- nm0001191
- nm0002071
# accept all movies by these directors
accept_directors:
- nm0000318
# reject movies by these directors
reject_directors:
- nm0093051
# accept all movies by these writers
accept_writers:
- nm0000318
# reject movies by these writers
reject_writers:
- nm0093051
# reject movies/TV shows with any of these ratings
reject_mpaa_ratings:
- PG_13
- R
- X
# accept movies/TV shows with only these ratings
accept_mpaa_ratings:
- PG
- G
- TV_Y
"""
schema = {
'type': 'object',
'properties': {
'min_year': {'type': 'integer'},
'max_year': {'type': 'integer'},
'min_votes': {'type': 'integer'},
'min_meta_score': {'type': 'integer'},
'min_score': {'type': 'number'},
'accept_genres': {'type': 'array', 'items': {'type': 'string'}},
'reject_genres': {'type': 'array', 'items': {'type': 'string'}},
'reject_languages': {'type': 'array', 'items': {'type': 'string'}},
'accept_languages': {'type': 'array', 'items': {'type': 'string'}},
'reject_actors': {'type': 'array', 'items': {'type': 'string'}},
'accept_actors': {'type': 'array', 'items': {'type': 'string'}},
'reject_directors': {'type': 'array', 'items': {'type': 'string'}},
'accept_directors': {'type': 'array', 'items': {'type': 'string'}},
'reject_writers': {'type': 'array', 'items': {'type': 'string'}},
'accept_writers': {'type': 'array', 'items': {'type': 'string'}},
'reject_mpaa_ratings': {'type': 'array', 'items': {'type': 'string'}},
'accept_mpaa_ratings': {'type': 'array', 'items': {'type': 'string'}},
},
'additionalProperties': False,
}
# Run later to avoid unnecessary lookups
@plugin.priority(120)
def on_task_filter(self, task, config):
lookup = plugin.get('imdb_lookup', self).lookup
# since the plugin does not reject anything, no sense going trough accepted
for entry in task.undecided:
force_accept = False
try:
lookup(entry)
except plugin.PluginError as e:
# logs skip message once trough log_once (info) and then only when ran from cmd line (w/o --cron)
log_once(
'Skipping {} because of an error: {}'.format(entry['title'], e.value),
logger=logger,
)
continue
# for key, value in entry.iteritems():
# log.debug('%s = %s (type: %s)' % (key, value, type(value)))
# Check defined conditions, TODO: rewrite into functions?
reasons = []
if 'min_score' in config:
if entry.get('imdb_score', 0) < config['min_score']:
reasons.append(
'min_score ({} < {})'.format(entry.get('imdb_score'), config['min_score'])
)
if 'min_votes' in config:
if entry.get('imdb_votes', 0) < config['min_votes']:
reasons.append(
'min_votes ({} < {})'.format(entry.get('imdb_votes'), config['min_votes'])
)
if 'min_meta_score' in config:
if entry.get('imdb_meta_score', 0) < config['min_meta_score']:
reasons.append(
'min_meta_score ({} < {})'.format(
entry.get('imdb_meta_score'), config['min_meta_score']
)
)
if 'min_year' in config:
if entry.get('imdb_year', 0) < config['min_year']:
reasons.append(
'min_year ({} < {})'.format(entry.get('imdb_year'), config['min_year'])
)
if 'max_year' in config:
if entry.get('imdb_year', 0) > config['max_year']:
reasons.append(
'max_year ({} > {})'.format(entry.get('imdb_year'), config['max_year'])
)
if 'accept_genres' in config:
accepted = config['accept_genres']
accept_genre = False
for genre in entry.get('imdb_genres', []):
if genre in accepted:
accept_genre = True
break
if accept_genre == False:
reasons.append('accept_genres')
if 'reject_genres' in config:
rejected = config['reject_genres']
for genre in entry.get('imdb_genres', []):
if genre in rejected:
reasons.append('reject_genres')
break
if 'reject_languages' in config:
rejected = config['reject_languages']
for language in entry.get('imdb_languages', []):
if language in rejected:
reasons.append('reject_languages')
break
if 'accept_languages' in config:
accepted = config['accept_languages']
if entry.get('imdb_languages') and entry['imdb_languages'][0] not in accepted:
# Reject if the first (primary) language is not among acceptable languages
reasons.append('accept_languages')
if 'reject_actors' in config:
rejected = config['reject_actors']
for actor_id, actor_name in entry.get('imdb_actors', {}).items():
if actor_id in rejected or actor_name in rejected:
reasons.append('reject_actors %s' % actor_name or actor_id)
break
# Accept if actors contains an accepted actor, but don't reject otherwise
if 'accept_actors' in config:
accepted = config['accept_actors']
for actor_id, actor_name in entry.get('imdb_actors', {}).items():
if actor_id in accepted or actor_name in accepted:
logger.debug(
'Accepting because of accept_actors {}', actor_name or actor_id
)
force_accept = True
break
if 'reject_directors' in config:
rejected = config['reject_directors']
for director_id, director_name in entry.get('imdb_directors', {}).items():
if director_id in rejected or director_name in rejected:
reasons.append('reject_directors %s' % director_name or director_id)
break
# Accept if the director is in the accept list, but do not reject if the director is unknown
if 'accept_directors' in config:
accepted = config['accept_directors']
for director_id, director_name in entry.get('imdb_directors', {}).items():
if director_id in accepted or director_name in accepted:
logger.debug(
'Accepting because of accept_directors {}',
director_name or director_id,
)
force_accept = True
break
if 'reject_writers' in config:
rejected = config['reject_writers']
for writer_id, writer_name in entry.get('imdb_writers', {}).items():
if writer_id in rejected or writer_name in rejected:
reasons.append('reject_writers %s' % writer_name or writer_id)
break
# Accept if the writer is in the accept list, but do not reject if the writer is unknown
if 'accept_writers' in config:
accepted = config['accept_writers']
for writer_id, writer_name in entry.get('imdb_writers', {}).items():
if writer_id in accepted or writer_name in accepted:
logger.debug(
'Accepting because of accept_writers {}', writer_name or writer_id
)
force_accept = True
break
if 'reject_mpaa_ratings' in config:
rejected = config['reject_mpaa_ratings']
if entry.get('imdb_mpaa_rating') in rejected:
reasons.append('reject_mpaa_ratings %s' % entry['imdb_mpaa_rating'])
if 'accept_mpaa_ratings' in config:
accepted = config['accept_mpaa_ratings']
if entry.get('imdb_mpaa_rating') not in accepted:
reasons.append('accept_mpaa_ratings %s' % entry.get('imdb_mpaa_rating'))
if reasons and not force_accept:
msg = 'Didn\'t accept `{}` because of rule(s) {}'.format(
entry.get('imdb_name', None) or entry['title'],
', '.join(reasons),
)
if task.options.debug:
logger.debug(msg)
else:
if task.options.cron:
log_once(msg, logger)
else:
logger.info(msg)
else:
logger.debug('Accepting {}', entry['title'])
entry.accept()
@event('plugin.register')
def register_plugin():
plugin.register(FilterImdb, 'imdb', api_ver=2) | PypiClean |
/LigBinder-0.1.4-py3-none-any.whl/ligbinder/md.py | import os
from typing import List, Optional
import subprocess
import logging
from ligbinder.settings import SETTINGS
logger = logging.getLogger(__name__)
class AmberMDEngine:
def __init__(
self,
path,
crd_file: Optional[str] = None,
top_file: Optional[str] = None,
trj_file: Optional[str] = None,
rst_file: Optional[str] = None,
log_file: Optional[str] = None,
ref_file: Optional[str] = None,
inp_file: Optional[str] = None,
steps=250000,
tstep=4.0,
use_gpu=True,
use_hmr=True,
apply_restraints=True,
restraint_force=1.0
) -> None:
self.crd_file = os.path.join(
path, crd_file if crd_file is not None else SETTINGS["md"]["crd_file"]
)
self.trj_file = os.path.join(
path, trj_file if trj_file is not None else SETTINGS["md"]["trj_file"]
)
self.top_file = os.path.join(
path, top_file if top_file is not None else SETTINGS["md"]["top_file"]
)
self.rst_file = os.path.join(
path, rst_file if rst_file is not None else SETTINGS["md"]["rst_file"]
)
self.log_file = os.path.join(
path, log_file if log_file is not None else SETTINGS["md"]["log_file"]
)
self.ref_file = os.path.join(
path, ref_file if ref_file is not None else SETTINGS["md"]["ref_file"]
)
self.inp_file = os.path.join(
path, inp_file if inp_file is not None else SETTINGS["md"]["inp_file"]
)
self.steps = steps
self.tstep = tstep
self.use_gpu = use_gpu
self.binary = "pmemd.cuda" if self.use_gpu else "sander"
self.apply_restraints = apply_restraints
self.restraint_force = restraint_force
self.use_hmr = use_hmr
def write_input(self):
interval = self.steps // 10
restraints = f'restraint_wt={self.restraint_force}, restraintmask=\'{SETTINGS["system"]["restraint_mask"]}\','
restraints = restraints if self.apply_restraints else ""
lines = [
"# Constant Volume",
"&cntrl",
"ntx=1, irest=0, iwrap=1,",
f"ntxo=2, ntpr={interval}, ntwx={interval}, ntwv=0, ntwe=0, ioutfm=1,",
f"nstlim={self.steps}, dt={self.tstep/1000},",
"ntc=2, ntf=2,",
"ntb=1, cut=9.0,",
"ntt=3, gamma_ln=4.0, ig=-1,",
"temp0=300,",
restraints,
"&end",
""
]
lines = [line for line in lines if line is not None]
msg = "\n".join(lines)
with open(self.inp_file, "w") as file:
file.write(msg)
def run(self):
self.write_input()
cmd = self._get_command()
logger.info('Running md engine')
logger.debug(f'{" ".join(cmd)}')
return subprocess.run(self._get_command(), check=True)
def _get_command(self) -> List[str]:
command = [
self.binary,
"-O",
"-i", f"{self.inp_file}",
"-o", f"{self.log_file}",
"-p", f"{self.top_file}",
"-c", f"{self.crd_file}",
"-x", f"{self.trj_file}",
"-r", f"{self.rst_file}"
]
command += ["-ref", f"{self.ref_file}"] if self.apply_restraints else []
return command | PypiClean |
/Kivy-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/kivy/uix/carousel.py | __all__ = ('Carousel', )
from functools import partial
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.uix.stencilview import StencilView
from kivy.uix.relativelayout import RelativeLayout
from kivy.properties import BooleanProperty, OptionProperty, AliasProperty, \
NumericProperty, ListProperty, ObjectProperty, StringProperty
class Carousel(StencilView):
'''Carousel class. See module documentation for more information.
'''
slides = ListProperty([])
'''List of slides inside the Carousel. The slides are the
widgets added to the Carousel using the :attr:`add_widget` method.
:attr:`slides` is a :class:`~kivy.properties.ListProperty` and is
read-only.
'''
def _get_slides_container(self):
return [x.parent for x in self.slides]
slides_container = AliasProperty(_get_slides_container, bind=('slides',))
direction = OptionProperty('right',
options=('right', 'left', 'top', 'bottom'))
'''Specifies the direction in which the slides are ordered. This
corresponds to the direction from which the user swipes to go from one
slide to the next. It
can be `right`, `left`, `top`, or `bottom`. For example, with
the default value of `right`, the second slide is to the right
of the first and the user would swipe from the right towards the
left to get to the second slide.
:attr:`direction` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'right'.
'''
min_move = NumericProperty(0.2)
'''Defines the minimum distance to be covered before the touch is
considered a swipe gesture and the Carousel content changed.
This is a expressed as a fraction of the Carousel's width.
If the movement doesn't reach this minimum value, the movement is
cancelled and the content is restored to its original position.
:attr:`min_move` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.2.
'''
anim_move_duration = NumericProperty(0.5)
'''Defines the duration of the Carousel animation between pages.
:attr:`anim_move_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.5.
'''
anim_cancel_duration = NumericProperty(0.3)
'''Defines the duration of the animation when a swipe movement is not
accepted. This is generally when the user does not make a large enough
swipe. See :attr:`min_move`.
:attr:`anim_cancel_duration` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.3.
'''
loop = BooleanProperty(False)
'''Allow the Carousel to loop infinitely. If True, when the user tries to
swipe beyond last page, it will return to the first. If False, it will
remain on the last page.
:attr:`loop` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_index(self):
if self.slides:
return self._index % len(self.slides)
return None
def _set_index(self, value):
if self.slides:
self._index = value % len(self.slides)
else:
self._index = None
index = AliasProperty(_get_index, _set_index,
bind=('_index', 'slides'),
cache=True)
'''Get/Set the current slide based on the index.
:attr:`index` is an :class:`~kivy.properties.AliasProperty` and defaults
to 0 (the first item).
'''
def _prev_slide(self):
slides = self.slides
len_slides = len(slides)
index = self.index
if len_slides < 2: # None, or 1 slide
return None
if self.loop and index == 0:
return slides[-1]
if index > 0:
return slides[index - 1]
previous_slide = AliasProperty(_prev_slide,
bind=('slides', 'index', 'loop'),
cache=True)
'''The previous slide in the Carousel. It is None if the current slide is
the first slide in the Carousel. This ordering reflects the order in which
the slides are added: their presentation varies according to the
:attr:`direction` property.
:attr:`previous_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
This property no longer exposes the slides container. It returns
the widget you have added.
'''
def _curr_slide(self):
if len(self.slides):
return self.slides[self.index or 0]
current_slide = AliasProperty(_curr_slide,
bind=('slides', 'index'),
cache=True)
'''The currently shown slide.
:attr:`current_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property no longer exposes the slides container. It returns
the widget you have added.
'''
def _next_slide(self):
if len(self.slides) < 2: # None, or 1 slide
return None
if self.loop and self.index == len(self.slides) - 1:
return self.slides[0]
if self.index < len(self.slides) - 1:
return self.slides[self.index + 1]
next_slide = AliasProperty(_next_slide,
bind=('slides', 'index', 'loop'),
cache=True)
'''The next slide in the Carousel. It is None if the current slide is
the last slide in the Carousel. This ordering reflects the order in which
the slides are added: their presentation varies according to the
:attr:`direction` property.
:attr:`next_slide` is an :class:`~kivy.properties.AliasProperty`.
.. versionchanged:: 1.5.0
The property no longer exposes the slides container.
It returns the widget you have added.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.
If the user has not moved :attr:`scroll_distance` within the timeout,
no scrolling will occur and the touch event will go to the children.
:attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to 200 (milliseconds)
.. versionadded:: 1.5.0
'''
scroll_distance = NumericProperty('20dp')
'''Distance to move before scrolling the :class:`Carousel` in pixels. As
soon as the distance has been traveled, the :class:`Carousel` will start
to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to 20dp.
.. versionadded:: 1.5.0
'''
anim_type = StringProperty('out_quad')
'''Type of animation to use while animating to the next/previous slide.
This should be the name of an
:class:`~kivy.animation.AnimationTransition` function.
:attr:`anim_type` is a :class:`~kivy.properties.StringProperty` and
defaults to 'out_quad'.
.. versionadded:: 1.8.0
'''
ignore_perpendicular_swipes = BooleanProperty(False)
'''Ignore swipes on axis perpendicular to direction.
:attr:`ignore_perpendicular_swipes` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
.. versionadded:: 1.10.0
'''
# private properties, for internal use only ###
_index = NumericProperty(0, allownone=True)
_prev = ObjectProperty(None, allownone=True)
_current = ObjectProperty(None, allownone=True)
_next = ObjectProperty(None, allownone=True)
_offset = NumericProperty(0)
_touch = ObjectProperty(None, allownone=True)
_change_touch_mode_ev = None
def __init__(self, **kwargs):
self._trigger_position_visible_slides = Clock.create_trigger(
self._position_visible_slides, -1)
super(Carousel, self).__init__(**kwargs)
self._skip_slide = None
self.touch_mode_change = False
self._prioritize_next = False
self.fbind('loop', lambda *args: self._insert_visible_slides())
def load_slide(self, slide):
'''Animate to the slide that is passed as the argument.
.. versionchanged:: 1.8.0
'''
slides = self.slides
start, stop = slides.index(self.current_slide), slides.index(slide)
if start == stop:
return
self._skip_slide = stop
if stop > start:
self._prioritize_next = True
self._insert_visible_slides(_next_slide=slide)
self.load_next()
else:
self._prioritize_next = False
self._insert_visible_slides(_prev_slide=slide)
self.load_previous()
def load_previous(self):
'''Animate to the previous slide.
.. versionadded:: 1.7.0
'''
self.load_next(mode='prev')
def load_next(self, mode='next'):
'''Animate to the next slide.
.. versionadded:: 1.7.0
'''
if self.index is not None:
w, h = self.size
_direction = {
'top': -h / 2,
'bottom': h / 2,
'left': w / 2,
'right': -w / 2}
_offset = _direction[self.direction]
if mode == 'prev':
_offset = -_offset
self._start_animation(min_move=0, offset=_offset)
def get_slide_container(self, slide):
return slide.parent
@property
def _prev_equals_next(self):
return self.loop and len(self.slides) == 2
def _insert_visible_slides(self, _next_slide=None, _prev_slide=None):
get_slide_container = self.get_slide_container
previous_slide = _prev_slide if _prev_slide else self.previous_slide
if previous_slide:
self._prev = get_slide_container(previous_slide)
else:
self._prev = None
current_slide = self.current_slide
if current_slide:
self._current = get_slide_container(current_slide)
else:
self._current = None
next_slide = _next_slide if _next_slide else self.next_slide
if next_slide:
self._next = get_slide_container(next_slide)
else:
self._next = None
if self._prev_equals_next:
setattr(self, '_prev' if self._prioritize_next else '_next', None)
super_remove = super(Carousel, self).remove_widget
for container in self.slides_container:
super_remove(container)
if self._prev and self._prev.parent is not self:
super(Carousel, self).add_widget(self._prev)
if self._next and self._next.parent is not self:
super(Carousel, self).add_widget(self._next)
if self._current:
super(Carousel, self).add_widget(self._current)
def _position_visible_slides(self, *args):
slides, index = self.slides, self.index
no_of_slides = len(slides) - 1
if not slides:
return
x, y, width, height = self.x, self.y, self.width, self.height
_offset, direction = self._offset, self.direction[0]
_prev, _next, _current = self._prev, self._next, self._current
get_slide_container = self.get_slide_container
last_slide = get_slide_container(slides[-1])
first_slide = get_slide_container(slides[0])
skip_next = False
_loop = self.loop
if direction in 'rl':
xoff = x + _offset
x_prev = {'l': xoff + width, 'r': xoff - width}
x_next = {'l': xoff - width, 'r': xoff + width}
if _prev:
_prev.pos = (x_prev[direction], y)
elif _loop and _next and index == 0:
# if first slide is moving to right with direction set to right
# or toward left with direction set to left
if ((_offset > 0 and direction == 'r') or
(_offset < 0 and direction == 'l')):
# put last_slide before first slide
last_slide.pos = (x_prev[direction], y)
skip_next = True
if _current:
_current.pos = (xoff, y)
if skip_next:
return
if _next:
_next.pos = (x_next[direction], y)
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction == 'r') or
(_offset > 0 and direction == 'l')):
first_slide.pos = (x_next[direction], y)
if direction in 'tb':
yoff = y + _offset
y_prev = {'t': yoff - height, 'b': yoff + height}
y_next = {'t': yoff + height, 'b': yoff - height}
if _prev:
_prev.pos = (x, y_prev[direction])
elif _loop and _next and index == 0:
if ((_offset > 0 and direction == 't') or
(_offset < 0 and direction == 'b')):
last_slide.pos = (x, y_prev[direction])
skip_next = True
if _current:
_current.pos = (x, yoff)
if skip_next:
return
if _next:
_next.pos = (x, y_next[direction])
elif _loop and _prev and index == no_of_slides:
if ((_offset < 0 and direction == 't') or
(_offset > 0 and direction == 'b')):
first_slide.pos = (x, y_next[direction])
def on_size(self, *args):
size = self.size
for slide in self.slides_container:
slide.size = size
self._trigger_position_visible_slides()
def on_pos(self, *args):
self._trigger_position_visible_slides()
def on_index(self, *args):
self._insert_visible_slides()
self._trigger_position_visible_slides()
self._offset = 0
def on_slides(self, *args):
if self.slides:
self.index = self.index % len(self.slides)
self._insert_visible_slides()
self._trigger_position_visible_slides()
def on__offset(self, *args):
self._trigger_position_visible_slides()
# if reached full offset, switch index to next or prev
direction = self.direction[0]
_offset = self._offset
width = self.width
height = self.height
index = self.index
if self._skip_slide is not None or index is None:
return
# Move to next slide?
if (direction == 'r' and _offset <= -width) or \
(direction == 'l' and _offset >= width) or \
(direction == 't' and _offset <= - height) or \
(direction == 'b' and _offset >= height):
if self.next_slide:
self.index += 1
# Move to previous slide?
elif (direction == 'r' and _offset >= width) or \
(direction == 'l' and _offset <= -width) or \
(direction == 't' and _offset >= height) or \
(direction == 'b' and _offset <= -height):
if self.previous_slide:
self.index -= 1
elif self._prev_equals_next:
new_value = (_offset < 0) is (direction in 'rt')
if self._prioritize_next is not new_value:
self._prioritize_next = new_value
if new_value is (self._next is None):
self._prev, self._next = self._next, self._prev
def _start_animation(self, *args, **kwargs):
# compute target offset for ease back, next or prev
new_offset = 0
direction = kwargs.get('direction', self.direction)[0]
is_horizontal = direction in 'rl'
extent = self.width if is_horizontal else self.height
min_move = kwargs.get('min_move', self.min_move)
_offset = kwargs.get('offset', self._offset)
if _offset < min_move * -extent:
new_offset = -extent
elif _offset > min_move * extent:
new_offset = extent
# if new_offset is 0, it wasn't enough to go next/prev
dur = self.anim_move_duration
if new_offset == 0:
dur = self.anim_cancel_duration
# detect edge cases if not looping
len_slides = len(self.slides)
index = self.index
if not self.loop or len_slides == 1:
is_first = (index == 0)
is_last = (index == len_slides - 1)
if direction in 'rt':
towards_prev = (new_offset > 0)
towards_next = (new_offset < 0)
else:
towards_prev = (new_offset < 0)
towards_next = (new_offset > 0)
if (is_first and towards_prev) or (is_last and towards_next):
new_offset = 0
anim = Animation(_offset=new_offset, d=dur, t=self.anim_type)
anim.cancel_all(self)
def _cmp(*l):
if self._skip_slide is not None:
self.index = self._skip_slide
self._skip_slide = None
anim.bind(on_complete=_cmp)
anim.start(self)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
touch.ud[self._get_uid('cavoid')] = True
return
if self.disabled:
return True
if self._touch:
return super(Carousel, self).on_touch_down(touch)
Animation.cancel_all(self)
self._touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'time': touch.time_start}
self._change_touch_mode_ev = Clock.schedule_once(
self._change_touch_mode, self.scroll_timeout / 1000.)
self.touch_mode_change = False
return True
def on_touch_move(self, touch):
if not self.touch_mode_change:
if self.ignore_perpendicular_swipes and \
self.direction in ('top', 'bottom'):
if abs(touch.oy - touch.y) < self.scroll_distance:
if abs(touch.ox - touch.x) > self.scroll_distance:
self._change_touch_mode()
self.touch_mode_change = True
elif self.ignore_perpendicular_swipes and \
self.direction in ('right', 'left'):
if abs(touch.ox - touch.x) < self.scroll_distance:
if abs(touch.oy - touch.y) > self.scroll_distance:
self._change_touch_mode()
self.touch_mode_change = True
if self._get_uid('cavoid') in touch.ud:
return
if self._touch is not touch:
super(Carousel, self).on_touch_move(touch)
return self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
ud = touch.ud[self._get_uid()]
direction = self.direction[0]
if ud['mode'] == 'unknown':
if direction in 'rl':
distance = abs(touch.ox - touch.x)
else:
distance = abs(touch.oy - touch.y)
if distance > self.scroll_distance:
ev = self._change_touch_mode_ev
if ev is not None:
ev.cancel()
ud['mode'] = 'scroll'
else:
if direction in 'rl':
self._offset += touch.dx
if direction in 'tb':
self._offset += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('cavoid') in touch.ud:
return
if self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
ev = self._change_touch_mode_ev
if ev is not None:
ev.cancel()
super(Carousel, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
self._start_animation()
else:
if self._touch is not touch and self.uid not in touch.ud:
super(Carousel, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(Carousel, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(Carousel, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._touch:
return
self._start_animation()
uid = self._get_uid()
touch = self._touch
ud = touch.ud[uid]
if ud['mode'] == 'unknown':
touch.ungrab(self)
self._touch = None
super(Carousel, self).on_touch_down(touch)
return
def add_widget(self, widget, index=0, *args, **kwargs):
container = RelativeLayout(
size=self.size, x=self.x - self.width, y=self.y)
container.add_widget(widget)
super(Carousel, self).add_widget(container, index, *args, **kwargs)
if index != 0:
self.slides.insert(index - len(self.slides), widget)
else:
self.slides.append(widget)
def remove_widget(self, widget, *args, **kwargs):
# XXX be careful, the widget.parent refer to the RelativeLayout
# added in add_widget(). But it will break if RelativeLayout
# implementation change.
# if we passed the real widget
slides = self.slides
if widget in slides:
if self.index >= slides.index(widget):
self.index = max(0, self.index - 1)
container = widget.parent
slides.remove(widget)
super(Carousel, self).remove_widget(container, *args, **kwargs)
container.remove_widget(widget)
return
super(Carousel, self).remove_widget(widget, *args, **kwargs)
def clear_widgets(self, children=None, *args, **kwargs):
# `children` must be a list of slides or None
if children is None:
children = self.slides[:]
remove_widget = self.remove_widget
for widget in children:
remove_widget(widget)
super(Carousel, self).clear_widgets()
if __name__ == '__main__':
from kivy.app import App
class Example1(App):
def build(self):
carousel = Carousel(direction='left',
loop=True)
for i in range(4):
src = "http://placehold.it/480x270.png&text=slide-%d&.png" % i
image = Factory.AsyncImage(source=src, fit_mode="contain")
carousel.add_widget(image)
return carousel
Example1().run() | PypiClean |
/Indomielibs-2.0.106.tar.gz/Indomielibs-2.0.106/pyrogram/methods/chats/promote_chat_member.py |
from typing import Union
import pyrogram
from pyrogram import raw, types, errors
class PromoteChatMember:
async def promote_chat_member(
self: "pyrogram.Client",
chat_id: Union[int, str],
user_id: Union[int, str],
privileges: "types.ChatPrivileges" = None,
) -> bool:
"""Promote or demote a user in a supergroup or a channel.
You must be an administrator in the chat for this to work and must have the appropriate admin rights.
Pass False for all boolean parameters to demote a user.
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target user.
For a contact that exists in your Telegram address book you can use his phone number (str).
privileges (:obj:`~pyrogram.types.ChatPrivileges`, *optional*):
New user privileges.
Returns:
``bool``: True on success.
Example:
.. code-block:: python
# Promote chat member to admin
await app.promote_chat_member(chat_id, user_id)
"""
chat_id = await self.resolve_peer(chat_id)
user_id = await self.resolve_peer(user_id)
# See Chat.promote_member for the reason of this (instead of setting types.ChatPrivileges() as default arg).
if privileges is None:
privileges = types.ChatPrivileges()
try:
raw_chat_member = (await self.invoke(
raw.functions.channels.GetParticipant(
channel=chat_id,
participant=user_id
)
)).participant
except errors.RPCError:
raw_chat_member = None
rank = None
if isinstance(raw_chat_member, raw.types.ChannelParticipantAdmin):
rank = raw_chat_member.rank
await self.invoke(
raw.functions.channels.EditAdmin(
channel=chat_id,
user_id=user_id,
admin_rights=raw.types.ChatAdminRights(
anonymous=privileges.is_anonymous,
change_info=privileges.can_change_info,
post_messages=privileges.can_post_messages,
edit_messages=privileges.can_edit_messages,
delete_messages=privileges.can_delete_messages,
ban_users=privileges.can_restrict_members,
invite_users=privileges.can_invite_users,
pin_messages=privileges.can_pin_messages,
add_admins=privileges.can_promote_members,
manage_call=privileges.can_manage_video_chats,
other=privileges.can_manage_chat
),
rank=rank or ""
)
)
return True | PypiClean |
/FRCUploader-3.6.2.tar.gz/FRCUploader-3.6.2/frcuploader/consts.py | import os
import pkg_resources
import tbapy
__version__ = pkg_resources.require("FRCUploader")[0].version
# Default Variables
DEBUG = False # DON'T COMMIT THIS LINE IF TRUE
DEFAULT_TAGS = "{}, frcuploader, FIRST, omgrobots, FRC, FIRST Robotics Competition, robots, Robotics, {game}"
MATCH_TYPE = ("qm", "qf", "sf", "f1m")
DEFAULT_DESCRIPTION = """Footage of the {ename} is courtesy of {team}.
Red Alliance ({red1}, {red2}, {red3}) - {redscore}
Blue Alliance ({blue3}, {blue2}, {blue1}) - {bluescore}
To view match schedules and results for this event, visit The Blue Alliance Event Page: https://www.thebluealliance.com/event/{ecode}
Follow us on Twitter (@{twit}) and Facebook ({fb}).
For more information and future event schedules, visit our website: {weblink}
Thanks for watching!"""
NO_TBA_DESCRIPTION = """Footage of the {ename} Event is courtesy of {team}.
Follow us on Twitter (@{twit}) and Facebook ({fb}).
For more information and future event schedules, visit our website: {weblink}
Thanks for watching!"""
CREDITS = """
Uploaded with FRC-YouTube-Uploader (https://github.com/NikhilNarayana/FRC-YouTube-Uploader) by Nikhil Narayana"""
VALID_PRIVACY_STATUSES = ("public", "unlisted", "private")
GAMES = {
"2023": "FIRST ENERGIZE: Charged Up, Charged Up, CHARGED UP",
"2022": "Rapid React, RAPID REACT",
"2021": "FIRST Rise: Infinite Recharge, Rise: INFINITE RECHARGE, INFINITE RECHARGE",
"2020": "FIRST Rise: Infinite Recharge, Rise: INFINITE RECHARGE, INFINITE RECHARGE",
"2019": "FIRST Destination: Deep Space, Destination: Deep Space, Deep Space",
"2018": "FIRST Power Up, FIRST POWER UP",
"2017": "FIRST Steamworks, FIRST STEAMworks",
"2016": "FIRST Stronghold",
"2015": "Recycle Rush",
"2014": "Aerial Assist",
"2013": "Ultimate Ascent",
}
# Extra Stuff
abbrv = "frc"
short_name = "frcuploader"
long_name = "FRC YouTube Uploader"
row_range = "Data!A1:G1"
first_run = True
stop_thread = False
response = None
status = None
error = None
sleep_minutes = 600
retry = 0
youtube = None
tba = tbapy.TBA("wvIxtt5Qvbr2qJtqW7ZsZ4vNppolYy0zMNQduH8LdYA7v2o1myt8ZbEOHAwzRuqf")
trusted = False
sizes = ("bytes", "KB", "MB", "GB", "TB")
cerem = (
"None",
"Opening Ceremonies",
"Alliance Selection",
"Closing Ceremonies",
"Highlight Reel",
)
frc_folder = os.path.join(os.path.expanduser("~"), ".frcuploader")
yt_accounts_folder = os.path.join(frc_folder, "accounts")
youtube_oauth_file = os.path.join(frc_folder, "frc-oauth2-youtube.json")
os.makedirs(yt_accounts_folder, exist_ok=True)
queue_values = os.path.join(frc_folder, "frc_queue_values.txt")
form_values = os.path.join(frc_folder, "frc_form_values.json")
log_file = os.path.join(frc_folder, "frc_log.txt")
rec_formats = (".ts", ".mkv", ".avi", ".mp4", ".flv", ".mov") | PypiClean |
/MezzanineFor1.7-3.1.10.tar.gz/MezzanineFor1.7-3.1.10/mezzanine/conf/forms.py | from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import urlize
from mezzanine.conf import settings, registry
from mezzanine.conf.models import Setting
FIELD_TYPES = {
bool: forms.BooleanField,
int: forms.IntegerField,
float: forms.FloatField,
}
class SettingsForm(forms.Form):
"""
Form for settings - creates a field for each setting in
``mezzanine.conf`` that is marked as editable.
"""
def __init__(self, *args, **kwargs):
super(SettingsForm, self).__init__(*args, **kwargs)
settings.use_editable()
# Create a form field for each editable setting's from its type.
for name in sorted(registry.keys()):
setting = registry[name]
if setting["editable"]:
field_class = FIELD_TYPES.get(setting["type"], forms.CharField)
kwargs = {
"label": setting["label"] + ":",
"required": setting["type"] in (int, float),
"initial": getattr(settings, name),
"help_text": self.format_help(setting["description"]),
}
if setting["choices"]:
field_class = forms.ChoiceField
kwargs["choices"] = setting["choices"]
self.fields[name] = field_class(**kwargs)
css_class = field_class.__name__.lower()
self.fields[name].widget.attrs["class"] = css_class
def __iter__(self):
"""
Calculate and apply a group heading to each field and order by the
heading.
"""
fields = list(super(SettingsForm, self).__iter__())
group = lambda field: field.name.split("_", 1)[0].title()
misc = _("Miscellaneous")
groups = defaultdict(int)
for field in fields:
groups[group(field)] += 1
for (i, field) in enumerate(fields):
setattr(fields[i], "group", group(field))
if groups[fields[i].group] == 1:
fields[i].group = misc
return iter(sorted(fields, key=lambda x: (x.group == misc, x.group)))
def save(self):
"""
Save each of the settings to the DB.
"""
for (name, value) in self.cleaned_data.items():
setting_obj, created = Setting.objects.get_or_create(name=name)
setting_obj.value = value
setting_obj.save()
def format_help(self, description):
"""
Format the setting's description into HTML.
"""
for bold in ("``", "*"):
parts = []
if description is None:
description = ""
for i, s in enumerate(description.split(bold)):
parts.append(s if i % 2 == 0 else "<b>%s</b>" % s)
description = "".join(parts)
return mark_safe(urlize(description).replace("\n", "<br>")) | PypiClean |
/Mopidy-16x2LCD-0.1.0.tar.gz/Mopidy-16x2LCD-0.1.0/README.rst | ****************************
Mopidy-IRControl
****************************
.. image:: https://pypip.in/v/Mopidy-16x2LCD/badge.png
:target: https://pypi.python.org/pypi/Mopidy-16x2LCD/
:alt: Latest PyPI version
.. image:: https://pypip.in/d/Mopidy-16x2LCD/badge.png
:target: https://pypi.python.org/pypi/Mopidy-16x2LCD/
:alt: Number of PyPI downloads
.. image:: https://api.travis-ci.org/spjoe/mopidy-16x2LCD.png?branch=master
:target: https://travis-ci.org/spjoe/mopidy-16x2LCD
:alt: Travis CI build status
.. image:: https://coveralls.io/repos/spjoe/mopidy-16x2LCD/badge.png?branch=master
:target: https://coveralls.io/r/spjoe/mopidy-16x2LCD?branch=master
:alt: Test coverage
A Mopidy frontend to see current played track and sound volume on a 16x2 character lcd from Adafruit.
Installation
============
Install by running::
pip install Mopidy-16x2LCD
Project resources
=================
- `Source code <https://github.com/spjoe/mopidy-16x2LCD>`_
- `Issue tracker <https://github.com/spjoe/mopidy-16x2LCD/issues>`_
- `Download development snapshot <https://github.com/spjoe/mopidy-16x2LCD/tarball/master#egg=Mopidy-16x2LCD-dev>`_
Changelog
=========
v0.1.0 - 08.10.2015
----------------------------------------
- Initial release.
| PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/build/lib/django_chuck/commands/install_modules.py | from django_chuck.commands.base import BaseCommand
import os
import sys
import shutil
from django_chuck.utils import append_to_file, get_files, get_template_engine, compile_template
from random import choice
class Command(BaseCommand):
help = "Create all modules"
# Which modules shall be installed
modules_to_install = []
# Remember which module were already installed
installed_modules = []
# Remember where we can find which module
module_cache = {}
# Post build actions
post_build_actions = []
def __init__(self):
super(Command, self).__init__()
self.opts.append(("modules", {
"help": "Comma seperated list of module names (can include pip modules)",
"default": "core",
"nargs": "?",
}))
def install_module(self, module_name):
module = self.module_cache.get(module_name, None)
# Module has post build action? Remember it
if module.cfg:
cfg = self.inject_variables_and_functions(module.cfg)
setattr(cfg, "installed_modules", self.installed_modules)
if module.post_build:
self.post_build_actions.append((module.name, module.post_build))
self.print_header("BUILDING " + module.name)
self.installed_modules.append(module)
# For each file in the module dir
for f in get_files(module.dir):
if not "chuck_module.py" in f:
# Absolute path to module file
input_file = f
# Relative path to module file
rel_path_old = f.replace(module.dir, "")
# Relative path to module file with project_name replaced
rel_path_new = f.replace(module.dir, "").replace("project", self.project_name)
# Absolute path to module file in site dir
output_file = f.replace(module.dir, self.site_dir).replace(rel_path_old, rel_path_new)
# Apply templates
print "\t%s -> %s" % (input_file, output_file)
compile_template(input_file, output_file, self.placeholder, self.site_dir, self.project_dir, self.template_engine, self.debug)
if module == "core":
secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@%^&*(-_=+)') for i in range(50)])
shutil.move(os.path.join(self.site_dir, ".gitignore_" + self.project_name), os.path.join(self.site_dir, ".gitignore"))
append_to_file(os.path.join(self.project_dir, "settings", "common.py"), "\nSECRET_KEY = '" + secret_key + "'\n")
self.installed_modules.append(module.name)
def handle(self, args, cfg):
super(Command, self).handle(args, cfg)
self.installed_modules = []
# Get module cache
self.module_cache = self.get_module_cache()
# Modules to install
self.modules_to_install = self.get_install_modules()
# The template engine that is used to compile the project files
template_engine = get_template_engine(self.site_dir, self.project_dir, cfg.get("template_engine"))
self.placeholder = {
"PROJECT_PREFIX": self.project_prefix,
"PROJECT_NAME": self.project_name,
"SITE_NAME": self.site_name,
"MODULE_BASEDIR": self.module_basedir,
"PYTHON_VERSION": self.python_version,
"PROJECT_BASEDIR": self.project_basedir,
"VIRTUALENV_BASEDIR": self.virtualenv_basedir,
"SERVER_PROJECT_BASEDIR": self.server_project_basedir,
"SERVER_VIRTUALENV_BASEDIR": self.server_virtualenv_basedir,
"EMAIL_DOMAIN": self.email_domain,
"MODULES": ','.join(self.modules_to_install),
}
# Project exists
if os.path.exists(self.site_dir) and not cfg.get("updating"):
self.print_header("EXISTING PROJECT " + self.site_dir)
answer = raw_input("Delete old project dir? <y/N>: ")
if answer.lower() == "y" or answer.lower() == "j":
shutil.rmtree(self.site_dir)
os.makedirs(self.site_dir)
else:
print "Aborting."
sys.exit(0)
# Building a new project
else:
os.makedirs(self.site_dir)
# Clean module list
self.modules_to_install = self.clean_module_list(self.modules_to_install, self.module_cache)
# Install each module
for module in self.modules_to_install:
self.install_module(module)
not_installed_modules = [m for m in self.modules_to_install if not m in self.installed_modules]
if not_installed_modules:
print "\n<<< The following modules cannot be found " + ",".join(not_installed_modules)
self.kill_system()
# we are using notch interactive template engine
# so we want to remove all chuck keywords after successful build
if (self.template_engine == "django_chuck.template.notch_interactive.engine" or not self.template_engine) and\
not self.debug:
for f in get_files(self.site_dir):
template_engine.remove_keywords(f)
# execute post build actions
if self.post_build_actions:
self.print_header("EXECUTING POST BUILD ACTIONS")
for action in self.post_build_actions:
print ">>> " + action[0]
try:
action[1]()
print "\n"
except Exception, e:
print str(e)
self.kill_system() | PypiClean |
/HyFetch-1.4.10.tar.gz/HyFetch-1.4.10/hyfetch/termenv.py | from __future__ import annotations
import os
import platform
import sys
from .color_util import RGB, AnsiMode
class OSCException(Exception):
pass
def unix_detect_ansi_mode() -> AnsiMode | None:
"""
Translated from Termenv's ColorProfile():
https://github.com/muesli/termenv/blob/42ca574de3e99a262e1724d2fb8daa1aea68a5b9/termenv_unix.go#L23
:return: Ansi mode
"""
if not sys.stdout.isatty():
return 'ansi'
term = os.environ.get('TERM')
color_term = os.environ.get('COLORTERM')
if color_term == 'truecolor' or color_term == '24bit':
if term.startswith('screen') and os.environ.get('TERM_PROGRAM') != 'tmux':
return '8bit'
return 'rgb'
elif color_term == 'true' or color_term == 'yes':
return '8bit'
if term == 'xterm-kitty':
return 'rgb'
elif term == 'linux':
return 'ansi'
if '256color' in term:
return 'rgb'
if 'color' in term:
return '8bit'
if 'ansi' in term:
return 'ansi'
return None
def windows_detect_ansi_mode() -> AnsiMode | None:
"""
Translated from Termenv's ColorProfile():
https://github.com/muesli/termenv/blob/42ca574de3e99a262e1724d2fb8daa1aea68a5b9/termenv_windows.go#L13
:return: Ansi mode
"""
if not sys.stdout.isatty():
return 'ansi'
if os.environ.get("ConEmuANSI") == "ON":
return 'rgb'
release, _, build = map(int, platform.version().split('.'))
if build < 10586 or release < 10:
# No ANSI support before Windows 10 build 10586.
if os.environ.get('ANSICON'):
conv = os.environ.get('ANSICON_VER')
if int(conv) < 181:
return 'ansi'
return '8bit'
return 'ansi'
if build < 14931:
# No true color support before build 14931.
return '8bit'
return 'rgb'
def detect_ansi_mode() -> AnsiMode | None:
system = platform.system().lower()
if system.startswith("linux") or system.startswith("darwin"):
return unix_detect_ansi_mode()
if system.startswith("windows"):
return windows_detect_ansi_mode()
return None
def unix_read_osc(seq: int) -> str:
import termios
import tty
import signal
from select import select
# screen/tmux can't support OSC, because they can be connected to multiple
# terminals concurrently.
term = os.environ.get('TERM')
if term.startswith("screen") or term.startswith("tmux"):
raise OSCException("Screen/tmux not supported")
t = sys.stdout
if not t.isatty():
raise OSCException("Not a tty")
fd = sys.stdin.fileno()
# Set raw mode
settings = termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
# first, send OSC query, which is ignored by terminal which do not support it
t.write(f"\x1b]{seq};?\x1b\\")
t.flush()
# stdin response timeout should be higher for ssh sessions
timeout = 0.05 if (os.environ.get('SSH_TTY') or os.environ.get('SSH_SESSION')) is None else 0.5
# Wait for input to appear
if not select([sys.stdin], [], [], timeout)[0]:
# Reset terminal back to normal mode (previously set to raw mode)
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
raise OSCException("No response received")
# Read until termination, or if it doesn't terminate, read until 1 second passes
def handler(signum, frame):
raise IOError()
signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, timeout, 1)
code = ""
try:
for _ in range(28):
code += sys.stdin.read(1)
# Terminate with sequence terminator [\ or bell ^G
if code.endswith('\x1b\\') or code.endswith('\a'):
break
signal.alarm(0)
except IOError:
pass
# Reset terminal back to normal mode (previously set to raw mode)
termios.tcsetattr(fd, termios.TCSADRAIN, settings)
# Validate output
if not code:
raise OSCException("No response received")
start = f"\x1b]{seq};"
if not code.startswith(start):
raise OSCException("Received response is not an OSC response")
# Strip starting code and termination code
code = code.lstrip(start).rstrip("\x1b\\").rstrip('\a')
return code
def get_background_color() -> RGB | None:
system = platform.system().lower()
if system.startswith("linux") or system.startswith("darwin"):
try:
osc = unix_read_osc(11).lstrip("rgb:")
return RGB.from_hex(''.join([v[:2] for v in osc.split('/')]))
except Exception:
return None
if system.startswith("windows"):
return None | PypiClean |
/ADLES-1.4.0.tar.gz/ADLES-1.4.0/adles/vsphere/vsphere_utils.py | import logging
from time import sleep, time
from pyVmomi import vim
from adles.utils import read_json, user_input
SLEEP_INTERVAL = 0.05
LONG_SLEEP = 1.0
class VsphereException(Exception):
pass
def wait_for_task(task, timeout=60.0, pause_timeout=True):
"""
Waits for a single vim.Task to finish and returns its result.
:param task: The task to wait for
:type task: vim.Task
:param float timeout: Number of seconds to wait before terminating task
:param bool pause_timeout: Pause timeout counter while task
is queued on server
:return: Task result information (task.info.result)
:rtype: str or None
"""
if not task: # Check if there's actually a task
logging.error("No task was specified to wait for")
return None
name = str(task.info.descriptionId)
obj = str(task.info.entityName)
wait_time = 0.0
end_time = time() + float(timeout) # Set end time
try:
while True:
if task.info.state == 'success': # It succeeded!
# Return the task result if it was successful
return task.info.result
elif task.info.state == 'error': # It failed...
logging.error("Error during task %s on object '%s': %s",
name, obj, str(task.info.error.msg))
return None
elif time() > end_time: # Check if it has exceeded the timeout
logging.error("Task %s timed out after %s seconds",
name, str(wait_time))
task.CancelTask() # Cancel the task since we've timed out
return None
elif task.info.state == 'queued':
sleep(LONG_SLEEP) # Sleep longer if it's queued up on system
# Don't count queue time against the timeout
if pause_timeout is True:
end_time += LONG_SLEEP
wait_time += LONG_SLEEP
else:
# Wait a bit so we don't waste resources checking state
sleep(SLEEP_INTERVAL)
wait_time += SLEEP_INTERVAL
except vim.fault.NoPermission as e:
logging.error("Permission denied for task %s on %s: need privilege %s",
name, obj, e.privilegeId)
except vim.fault.TaskInProgress as e:
logging.error("Cannot complete task %s: "
"task %s is already in progress on %s",
name, e.task.info.name, obj)
except vim.fault.InvalidPowerState as e:
logging.error("Cannot complete task %s: "
"%s is in invalid power state %s",
name, obj, e.existingState)
except vim.fault.InvalidState as e:
logging.error("Cannot complete task %s: "
"invalid state for %s\n%s", name, obj, str(e))
except vim.fault.CustomizationFault:
logging.error("Cannot complete task %s: "
"invalid customization for %s", name, obj)
except vim.fault.VmConfigFault:
logging.error("Cannot complete task %s: "
"invalid configuration for VM %s", name, obj)
except vim.fault.InvalidName as e:
logging.error("Cannot complete task %s for object %s: "
"name '%s' is not valid", name, obj, e.name)
except vim.fault.DuplicateName as e:
logging.error("Cannot complete task %s for %s: "
"there is a duplicate named %s", name, obj, e.name)
except vim.fault.InvalidDatastore as e:
logging.error("Cannot complete task %s for %s: "
"invalid Datastore '%s'", name, obj, e.datastore)
except vim.fault.AlreadyExists:
logging.error("Cannot complete task %s: "
"%s already exists", name, obj)
except vim.fault.NotFound:
logging.error("Cannot complete task %s: "
"%s does not exist", name, obj)
except vim.fault.ResourceInUse:
logging.error("Cannot complete task %s: "
"resource %s is in use", name, obj)
return None
# This line allows calling "<task>.wait(<params>)"
# instead of "wait_for_task(task, params)"
#
# This works because the implicit first argument
# to a class method call in Python is the instance
vim.Task.wait = wait_for_task # Inject into vim.Task class
# From: list_dc_datastore_info in pyvmomi-community-samples
def get_datastore_info(ds_obj):
"""
Gets a human-readable summary of a Datastore.
:param ds_obj: The datastore to get information on
:type ds_obj: vim.Datastore
:return: The datastore's information
:rtype: str
"""
if not ds_obj:
logging.error("No Datastore was given to get_datastore_info")
return ""
from adles.utils import sizeof_fmt
info_string = "\n"
summary = ds_obj.summary
ds_capacity = summary.capacity
ds_freespace = summary.freeSpace
ds_uncommitted = summary.uncommitted if summary.uncommitted else 0
ds_provisioned = ds_capacity - ds_freespace + ds_uncommitted
ds_overp = ds_provisioned - ds_capacity
ds_overp_pct = (ds_overp * 100) / ds_capacity if ds_capacity else 0
info_string += "Name : %s\n" % summary.name
info_string += "URL : %s\n" % summary.url
info_string += "Capacity : %s\n" % sizeof_fmt(ds_capacity)
info_string += "Free Space : %s\n" % sizeof_fmt(ds_freespace)
info_string += "Uncommitted : %s\n" % sizeof_fmt(ds_uncommitted)
info_string += "Provisioned : %s\n" % sizeof_fmt(ds_provisioned)
if ds_overp > 0:
info_string += "Over-provisioned : %s / %s %%\n" \
% (sizeof_fmt(ds_overp), ds_overp_pct)
info_string += "Hosts : %d\n" % len(ds_obj.host)
info_string += "Virtual Machines : %d" % len(ds_obj.vm)
return info_string
vim.Datastore.get_info = get_datastore_info
def make_vsphere(filename=None):
"""
Creates a vSphere object using either a JSON file or by prompting the user.
:param str filename: Name of JSON file with connection info
:return: vSphere object
:rtype: :class:`Vsphere`
"""
from adles.vsphere.vsphere_class import Vsphere
if filename is not None:
info = read_json(filename)
if info is None:
raise VsphereException("Failed to create vSphere object")
return Vsphere(username=info.get("user"),
password=info.get("pass"),
hostname=info.get("host"),
port=info.get("port", 443),
datacenter=info.get("datacenter"),
datastore=info.get("datastore"))
else:
logging.info("Enter information to connect to the vSphere environment")
datacenter = input("Datacenter : ")
datastore = input("Datastore : ")
return Vsphere(datacenter=datacenter, datastore=datastore)
def resolve_path(server, thing, prompt=""):
"""
This is a hacked together script utility to get folders or VMs.
:param server: Vsphere instance
:type server: :class:`Vsphere`
:param str thing: String name of thing to get (folder | vm)
:param str prompt: Message to display
:return: (thing, thing name)
:rtype: tuple(vimtype, str)
"""
# TODO: use pathlib
from adles.vsphere.vm import VM
if thing.lower() == "vm":
get = server.get_vm
elif thing.lower() == "folder":
get = server.get_folder
else:
logging.error("Invalid thing passed to resolve_path: %s", thing)
raise ValueError
res = user_input("Name of or path to %s %s: " % (thing, prompt), thing,
lambda x: server.find_by_inv_path("vm/" + x)
if '/' in x else get(x))
if thing.lower() == "vm":
return VM(vm=res[0]), res[1]
else:
return res
def is_folder(obj):
"""
Checks if object is a vim.Folder.
:param obj: The object to check
:return: If the object is a folder
:rtype: bool
"""
return hasattr(obj, "childEntity")
def is_vm(obj):
"""
Checks if object is a vim.VirtualMachine.
:param obj: The object to check
:return: If the object is a VM
:rtype: bool
"""
return hasattr(obj, "summary") | PypiClean |
/Django_Lookout-0.1.2-py3-none-any.whl/lookout/report_schemas/hpkp.py | from django.utils import timezone, dateparse
from .generic import GenericReportSchema
from .legacy import LegacyReportSchema
__all__ = ['HPKPReportSchema', 'LegacyHPKPReportSchema']
class HPKPReportSchema (GenericReportSchema):
type = 'hpkp'
name = "HTTP Public Key Pinning Report"
description = "A report sent by a user agent when a HPKP policy is violated."
body_schema = {
'required': ['hostname'],
'properties': {
'hostname': {
'description': "The hostname to which the user agent made the original request that failed pin validation.",
'type': 'string',
'anyOf': [
{'format': 'hostname'},
{'format': 'ipv4'},
{'format': 'ipv6'}
]
},
'port': {
'description': "The port to which the user agent made the original request that failed pin validation.",
'type': 'integer'
},
'noted-hostname': {
'description': "The hostname that the user agent noted when it noted the known pinned host.",
'type': 'string',
'anyOf': [
{'format': 'hostname'},
{'format': 'ipv4'},
{'format': 'ipv6'}
]
},
'include-subdomains': {
'description': "Whether or not the user agent has noted the includeSubDomains directive for the known pinned host.",
'type': 'boolean'
},
'served-certificate-chain': {
'description': "The certificate chain, as served by the known pinned host during TLS session setup.",
'type': 'array',
'minItems': 1,
'items': {
'type': 'string'
}
},
'validated-certificate-chain': {
'description': "The certificate chain, as constructed by the user agent during certificate chain verification.",
'type': 'array',
'minItems': 1,
'items': {
'type': 'string'
}
},
'known-pins': {
'description': "The pins that the user agent has noted for the known pinned host.",
'type': 'array',
'items': {
'type': 'string',
'pattern': '^(.+)=(?:\'|")(.+)(?:\'|")$'
}
},
'effective-expiration-date': {
'description': "The effective expiration date for the noted pins.",
'type': 'string',
'format': 'date-time'
}
}
}
class LegacyHPKPReportSchema (LegacyReportSchema):
generic_class = HPKPReportSchema
@classmethod
def normalize (cls, report_data):
""" Adapts the legacy HPKP schema to the HTTP Reporting API schema """
# The number of milliseconds between ``date-time`` and now
age = (timezone.now() - dateparse.parse_datetime(report_data.pop('date-time'))).microseconds // 1000
return cls.generic_class, {
'type': cls.type,
'age': age,
'url': 'https://{}/'.format(report_data.get('hostname', '')),
'body': report_data
} | PypiClean |
Subsets and Splits